Skip to content

Commit

Permalink
[Improvement] Use Pylint to polish code style (open-mmlab#908)
Browse files Browse the repository at this point in the history
* polish

* polish

* polish tools

* polish tests

* polish demo

* remove topk class
  • Loading branch information
dreamerlin authored Jun 24, 2021
1 parent a29e4aa commit 79fba89
Show file tree
Hide file tree
Showing 61 changed files with 390 additions and 386 deletions.
4 changes: 3 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=print-statement,
disable=import-outside-toplevel
redefined-outer-name
print-statement,
parameter-unpacking,
unpacking-in-except,
old-raise-syntax,
Expand Down
2 changes: 1 addition & 1 deletion demo/demo_gradcam.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def build_inputs(model, video_path, use_frames=False):
if osp.isfile(video_path) and use_frames:
raise RuntimeError(
f"'{video_path}' is a video file, not a rawframe directory")
elif osp.isdir(video_path) and not use_frames:
if osp.isdir(video_path) and not use_frames:
raise RuntimeError(
f"'{video_path}' is a rawframe directory, not a video file")

Expand Down
2 changes: 1 addition & 1 deletion demo/long_video_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def inference(model, data, args, frame_queue):

if args.stride > 0:
pred_stride = int(args.sample_length * args.stride)
for i in range(pred_stride):
for _ in range(pred_stride):
frame_queue.popleft()

# for case ``args.stride=0``
Expand Down
4 changes: 2 additions & 2 deletions demo/webcam_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def show_results():
cur_time = time.time()
while True:
msg = 'Waiting for action ...'
ret, frame = camera.read()
_, frame = camera.read()
frame_queue.append(np.array(frame[:, :, ::-1]))

if len(result_queue) != 0:
Expand All @@ -93,7 +93,7 @@ def show_results():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)

elif len(text_info):
elif len(text_info) != 0:
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
Expand Down
33 changes: 16 additions & 17 deletions demo/webcam_demo_spatiotemporal_det.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,6 @@ def _do_detect(self, image):
The format of bboxes is (xmin, ymin, xmax, ymax) in pixels.
"""
pass

def predict(self, task):
"""Add keyframe bboxes to task."""
Expand Down Expand Up @@ -571,20 +570,20 @@ def __next__(self):
if self.read_queue.qsize() == 0:
time.sleep(0.02)
return not self.stopped, None
else:
was_read, task = self.read_queue.get()
if not was_read:
# If we reach the end of the video, there aren't enough frames
# in the task.processed_frames, so no need to model inference
# and draw predictions. Put task into display queue.
with self.read_id_lock:
read_id = self.read_id
with self.display_lock:
self.display_queue[read_id] = was_read, copy.deepcopy(task)

# main thread doesn't need to handle this task again
task = None
return was_read, task

was_read, task = self.read_queue.get()
if not was_read:
# If we reach the end of the video, there aren't enough frames
# in the task.processed_frames, so no need to model inference
# and draw predictions. Put task into display queue.
with self.read_id_lock:
read_id = self.read_id
with self.display_lock:
self.display_queue[read_id] = was_read, copy.deepcopy(task)

# main thread doesn't need to handle this task again
task = None
return was_read, task

def start(self):
"""Start read thread and display thread."""
Expand Down Expand Up @@ -685,9 +684,9 @@ def draw_clip_range(self, frames, preds, bboxes, draw_range):
@abstractmethod
def draw_one_image(self, frame, bboxes, preds):
"""Draw bboxes and corresponding texts on one frame."""
pass

def abbrev(self, name):
@staticmethod
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
Expand Down
4 changes: 2 additions & 2 deletions mmaction/apis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,5 +225,5 @@ def train_model(model,

eval_res = test_dataset.evaluate(outputs, **eval_cfg)
runner.logger.info(f'Testing results of the {name} checkpoint')
for name, val in eval_res.items():
runner.logger.info(f'{name}: {val:.04f}')
for metric_name, val in eval_res.items():
runner.logger.info(f'{metric_name}: {val:.04f}')
32 changes: 16 additions & 16 deletions mmaction/core/bbox/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,22 +15,22 @@ def bbox2result(bboxes, labels, num_classes, thr=0.01):
"""
if bboxes.shape[0] == 0:
return list(np.zeros((num_classes - 1, 0, 5), dtype=np.float32))
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()

# We only handle multilabel now
assert labels.shape[-1] > 1
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()

scores = labels # rename for clarification
thr = (thr, ) * num_classes if isinstance(thr, float) else thr
assert scores.shape[1] == num_classes
assert len(thr) == num_classes
# We only handle multilabel now
assert labels.shape[-1] > 1

result = []
for i in range(num_classes - 1):
where = scores[:, i + 1] > thr[i + 1]
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result
scores = labels # rename for clarification
thr = (thr, ) * num_classes if isinstance(thr, float) else thr
assert scores.shape[1] == num_classes
assert len(thr) == num_classes

result = []
for i in range(num_classes - 1):
where = scores[:, i + 1] > thr[i + 1]
result.append(
np.concatenate((bboxes[where, :4], scores[where, i + 1:i + 2]),
axis=1))
return result
7 changes: 4 additions & 3 deletions mmaction/core/evaluation/ava_evaluation/np_box_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,9 @@ def get_coordinates(self):
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]

def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax,
@staticmethod
def _is_valid_boxes(data):
"""Check whether data fulfills the format of N*[ymin, xmin, ymax,
xmin].
Args:
Expand All @@ -131,7 +132,7 @@ def _is_valid_boxes(self, data):
a boolean indicating whether all ymax of boxes are equal or greater
than ymin, and all xmax of boxes are equal or greater than xmin.
"""
if len(data):
if len(data) != 0:
for v in data:
if v[0] > v[2] or v[1] > v[3]:
return False
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

import collections
import logging
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict

Expand Down Expand Up @@ -101,15 +102,13 @@ def clear(self):
class ObjectDetectionEvaluator(DetectionEvaluator):
"""A class to evaluate detections."""

def __init__(
self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False,
):
def __init__(self,
categories,
matching_iou_threshold=0.5,
evaluate_corlocs=False,
metric_prefix=None,
use_weighted_mean_ap=False,
evaluate_masks=False):
"""Constructor.
Args:
Expand Down Expand Up @@ -244,7 +243,8 @@ def add_single_detected_image_info(self, image_id, detections_dict):
detected_masks=detection_masks,
)

def create_category_index(self, categories):
@staticmethod
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category
id.
Expand Down Expand Up @@ -277,14 +277,8 @@ def evaluate(self):
2. per_category_ap: category specific results with keys of the form
'PerformanceByCategory/mAP@<matching_iou_threshold>IOU/category'
"""
(
per_class_ap,
mean_ap,
_,
_,
per_class_corloc,
mean_corloc,
) = self._evaluation.evaluate()
(per_class_ap, mean_ap, _, _, per_class_corloc,
mean_corloc) = self._evaluation.evaluate()

metric = f'mAP@{self._matching_iou_threshold}IOU'
pascal_metrics = {self._metric_prefix + metric: mean_ap}
Expand Down Expand Up @@ -355,15 +349,13 @@ def __init__(self, categories, matching_iou_threshold=0.5):
class ObjectDetectionEvaluation:
"""Internal implementation of Pascal object detection metrics."""

def __init__(
self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
use_weighted_mean_ap=False,
label_id_offset=0,
):
def __init__(self,
num_groundtruth_classes,
matching_iou_threshold=0.5,
nms_iou_threshold=1.0,
nms_max_output_boxes=10000,
use_weighted_mean_ap=False,
label_id_offset=0):
if num_groundtruth_classes < 1:
raise ValueError(
'Need at least 1 groundtruth class for evaluation.')
Expand Down Expand Up @@ -399,13 +391,11 @@ def _initialize_detections(self):
def clear_detections(self):
self._initialize_detections()

def add_single_ground_truth_image_info(
self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_masks=None,
):
def add_single_ground_truth_image_info(self,
image_key,
groundtruth_boxes,
groundtruth_class_labels,
groundtruth_masks=None):
"""Adds groundtruth for a single image to be used for evaluation.
Args:
Expand All @@ -420,8 +410,8 @@ def add_single_ground_truth_image_info(
masks. The mask values range from 0 to 1.
"""
if image_key in self.groundtruth_boxes:
logging.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
warnings.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
return

self.groundtruth_boxes[image_key] = groundtruth_boxes
Expand All @@ -430,14 +420,12 @@ def add_single_ground_truth_image_info(

self._update_ground_truth_statistics(groundtruth_class_labels)

def add_single_detected_image_info(
self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None,
):
def add_single_detected_image_info(self,
image_key,
detected_boxes,
detected_scores,
detected_class_labels,
detected_masks=None):
"""Adds detections for a single image to be used for evaluation.
Args:
Expand Down Expand Up @@ -468,8 +456,8 @@ def add_single_detected_image_info(
)

if image_key in self.detection_keys:
logging.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
warnings.warn(('image %s has already been added to the ground '
'truth database.'), image_key)
return

self.detection_keys.add(image_key)
Expand Down Expand Up @@ -536,8 +524,7 @@ def evaluate(self):
logging.info(
'The following classes have no ground truth examples: %s',
np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
self.label_id_offset,
)
self.label_id_offset)

if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
Expand All @@ -557,10 +544,8 @@ def evaluate(self):
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
precision, recall = metrics.compute_precision_recall(
scores,
tp_fp_labels,
self.num_gt_instances_per_class[class_index],
)
scores, tp_fp_labels,
self.num_gt_instances_per_class[class_index])
self.precisions_per_class.append(precision)
self.recalls_per_class.append(recall)
average_precision = metrics.compute_average_precision(
Expand All @@ -569,8 +554,7 @@ def evaluate(self):

self.corloc_per_class = metrics.compute_cor_loc(
self.num_gt_imgs_per_class,
self.num_images_correctly_detected_per_class,
)
self.num_images_correctly_detected_per_class)

if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
Expand Down
Loading

0 comments on commit 79fba89

Please sign in to comment.