diff --git a/sahi/annotation.py b/sahi/annotation.py index 34d07f1e3..f1780a5c4 100644 --- a/sahi/annotation.py +++ b/sahi/annotation.py @@ -111,11 +111,7 @@ def __repr__(self): class Mask: @classmethod def from_float_mask( - cls, - mask, - full_shape=None, - mask_threshold: float = 0.5, - shift_amount: list = [0, 0], + cls, mask, full_shape=None, mask_threshold: float = 0.5, shift_amount: list = [0, 0], ): """ Args: @@ -130,18 +126,11 @@ def from_float_mask( Size of the full image after shifting, should be in the form of [height, width] """ bool_mask = mask > mask_threshold - return cls( - bool_mask=bool_mask, - shift_amount=shift_amount, - full_shape=full_shape, - ) + return cls(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,) @classmethod def from_coco_segmentation( - cls, - segmentation, - full_shape=None, - shift_amount: list = [0, 0], + cls, segmentation, full_shape=None, shift_amount: list = [0, 0], ): """ Init Mask from coco segmentation representation. @@ -163,17 +152,10 @@ def from_coco_segmentation( assert full_shape is not None, "full_shape must be provided" bool_mask = get_bool_mask_from_coco_segmentation(segmentation, height=full_shape[0], width=full_shape[1]) - return cls( - bool_mask=bool_mask, - shift_amount=shift_amount, - full_shape=full_shape, - ) + return cls(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,) def __init__( - self, - bool_mask=None, - full_shape=None, - shift_amount: list = [0, 0], + self, bool_mask=None, full_shape=None, shift_amount: list = [0, 0], ): """ Args: @@ -234,14 +216,7 @@ def get_shifted_mask(self): # Confirm full_shape is specified assert (self.full_shape_height is not None) and (self.full_shape_width is not None), "full_shape is None" # init full mask - mask_fullsized = np.full( - ( - self.full_shape_height, - self.full_shape_width, - ), - 0, - dtype="float32", - ) + mask_fullsized = np.full((self.full_shape_height, self.full_shape_width,), 0, dtype="float32",) # arrange starting ending indexes starting_pixel = [self.shift_x, self.shift_y] @@ -255,11 +230,7 @@ def get_shifted_mask(self): : ending_pixel[1] - starting_pixel[1], : ending_pixel[0] - starting_pixel[0] ] - return Mask( - mask_fullsized, - shift_amount=[0, 0], - full_shape=self.full_shape, - ) + return Mask(mask_fullsized, shift_amount=[0, 0], full_shape=self.full_shape,) def to_coco_segmentation(self): """ @@ -470,10 +441,7 @@ def from_shapely_annotation( @classmethod def from_imantics_annotation( - cls, - annotation, - shift_amount: Optional[List[int]] = [0, 0], - full_shape: Optional[List[int]] = None, + cls, annotation, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None, ): """ Creates ObjectAnnotation from imantics.annotation.Annotation @@ -527,18 +495,11 @@ def __init__( self.mask = None self.bbox = BoundingBox(bbox, shift_amount) else: - self.mask = Mask( - bool_mask=bool_mask, - shift_amount=shift_amount, - full_shape=full_shape, - ) + self.mask = Mask(bool_mask=bool_mask, shift_amount=shift_amount, full_shape=full_shape,) bbox = get_bbox_from_bool_mask(bool_mask) self.bbox = BoundingBox(bbox, shift_amount) category_name = category_name if category_name else str(category_id) - self.category = Category( - id=category_id, - name=category_name, - ) + self.category = Category(id=category_id, name=category_name,) self.merged = None @@ -554,9 +515,7 @@ def to_coco_annotation(self): ) else: coco_annotation = CocoAnnotation.from_coco_bbox( - bbox=self.bbox.to_coco_bbox(), - category_id=self.category.id, - category_name=self.category.name, + bbox=self.bbox.to_coco_bbox(), category_id=self.category.id, category_name=self.category.name, ) return coco_annotation @@ -573,10 +532,7 @@ def to_coco_prediction(self): ) else: coco_prediction = CocoPrediction.from_coco_bbox( - bbox=self.bbox.to_coco_bbox(), - category_id=self.category.id, - category_name=self.category.name, - score=1, + bbox=self.bbox.to_coco_bbox(), category_id=self.category.id, category_name=self.category.name, score=1, ) return coco_prediction @@ -589,9 +545,7 @@ def to_shapely_annotation(self): segmentation=self.mask.to_coco_segmentation(), ) else: - shapely_annotation = ShapelyAnnotation.from_coco_bbox( - bbox=self.bbox.to_coco_bbox(), - ) + shapely_annotation = ShapelyAnnotation.from_coco_bbox(bbox=self.bbox.to_coco_bbox(),) return shapely_annotation def to_imantics_annotation(self): diff --git a/sahi/model.py b/sahi/model.py index 91a1d1728..b793103d4 100644 --- a/sahi/model.py +++ b/sahi/model.py @@ -89,9 +89,7 @@ def perform_inference(self, image: np.ndarray, image_size: int = None): NotImplementedError() def _create_object_prediction_list_from_original_predictions( - self, - shift_amount: Optional[List[int]] = [0, 0], - full_shape: Optional[List[int]] = None, + self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None, ): """ This function should be implemented in a way that self._original_predictions should @@ -119,9 +117,7 @@ def _apply_category_remapping(self): object_prediction.category.id = new_category_id_int def convert_original_predictions( - self, - shift_amount: Optional[List[int]] = [0, 0], - full_shape: Optional[List[int]] = None, + self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None, ): """ Converts original predictions of the detection model to a list of @@ -134,8 +130,7 @@ def convert_original_predictions( Size of the full image after shifting, should be in the form of [height, width] """ self._create_object_prediction_list_from_original_predictions( - shift_amount=shift_amount, - full_shape=full_shape, + shift_amount=shift_amount, full_shape=full_shape, ) if self.category_remapping: self._apply_category_remapping() @@ -148,9 +143,7 @@ def object_prediction_list(self): def original_predictions(self): return self._original_predictions - def _create_predictions_from_object_prediction_list( - object_prediction_list: List[ObjectPrediction], - ): + def _create_predictions_from_object_prediction_list(object_prediction_list: List[ObjectPrediction],): """ This function should be implemented in a way that it converts a list of prediction.ObjectPrediction instance to detection model's original prediction format. @@ -179,11 +172,7 @@ def load_model(self): from mmdet.apis import init_detector # set model - model = init_detector( - config=self.config_path, - checkpoint=self.model_path, - device=self.device, - ) + model = init_detector(config=self.config_path, checkpoint=self.model_path, device=self.device,) self.model = model # set category_mapping @@ -250,9 +239,7 @@ def category_names(self): return self.model.CLASSES def _create_object_prediction_list_from_original_predictions( - self, - shift_amount: Optional[List[int]] = [0, 0], - full_shape: Optional[List[int]] = None, + self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None, ): """ self._original_predictions is converted to a list of prediction.ObjectPrediction and set to @@ -307,8 +294,7 @@ def _create_object_prediction_list_from_original_predictions( self._object_prediction_list = object_prediction_list def _create_original_predictions_from_object_prediction_list( - self, - object_prediction_list: List[ObjectPrediction], + self, object_prediction_list: List[ObjectPrediction], ): """ Converts a list of prediction.ObjectPrediction instance to detection model's original prediction format. @@ -426,9 +412,7 @@ def category_names(self): return self.model.names def _create_object_prediction_list_from_original_predictions( - self, - shift_amount: Optional[List[int]] = [0, 0], - full_shape: Optional[List[int]] = None, + self, shift_amount: Optional[List[int]] = [0, 0], full_shape: Optional[List[int]] = None, ): """ self._original_predictions is converted to a list of prediction.ObjectPrediction and set to @@ -472,8 +456,7 @@ def _create_object_prediction_list_from_original_predictions( self._object_prediction_list = object_prediction_list def _create_original_predictions_from_object_prediction_list( - self, - object_prediction_list: List[ObjectPrediction], + self, object_prediction_list: List[ObjectPrediction], ): """ Converts a list of prediction.ObjectPrediction instance to detection model's original diff --git a/sahi/postprocess/combine.py b/sahi/postprocess/combine.py index 79ddc9780..40c79e443 100644 --- a/sahi/postprocess/combine.py +++ b/sahi/postprocess/combine.py @@ -45,10 +45,7 @@ class PostprocessPredictions: """Combines predictions using NMS elimination utilizing provided match metric ('IOU' or 'IOS')""" def __init__( - self, - match_threshold: float = 0.5, - match_metric: str = "IOU", - class_agnostic: bool = True, + self, match_threshold: float = 0.5, match_metric: str = "IOU", class_agnostic: bool = True, ): self.match_threshold = match_threshold self.class_agnostic = class_agnostic @@ -100,8 +97,7 @@ def __call__(self): class NMSPostprocess(PostprocessPredictions): def __call__( - self, - object_predictions: List[ObjectPrediction], + self, object_predictions: List[ObjectPrediction], ): source_object_predictions: List[ObjectPrediction] = copy.deepcopy(object_predictions) selected_object_predictions: List[ObjectPrediction] = [] @@ -124,8 +120,7 @@ def __call__( class UnionMergePostprocess(PostprocessPredictions): def __call__( - self, - object_predictions: List[ObjectPrediction], + self, object_predictions: List[ObjectPrediction], ): source_object_predictions: List[ObjectPrediction] = copy.deepcopy(object_predictions) selected_object_predictions: List[ObjectPrediction] = [] @@ -150,11 +145,7 @@ def __call__( selected_object_predictions.append(selected_object_prediction) return selected_object_predictions - def _merge_object_prediction_pair( - self, - pred1: ObjectPrediction, - pred2: ObjectPrediction, - ) -> ObjectPrediction: + def _merge_object_prediction_pair(self, pred1: ObjectPrediction, pred2: ObjectPrediction,) -> ObjectPrediction: shift_amount = pred1.bbox.shift_amount merged_bbox: BoundingBox = self._get_merged_bbox(pred1, pred2) merged_score: float = self._get_merged_score(pred1, pred2) @@ -191,10 +182,7 @@ def _get_merged_bbox(pred1: ObjectPrediction, pred2: ObjectPrediction) -> Boundi return bbox @staticmethod - def _get_merged_score( - pred1: ObjectPrediction, - pred2: ObjectPrediction, - ) -> float: + def _get_merged_score(pred1: ObjectPrediction, pred2: ObjectPrediction,) -> float: scores: List[float] = [pred.score.value for pred in (pred1, pred2)] return max(scores) @@ -203,8 +191,4 @@ def _get_merged_mask(pred1: ObjectPrediction, pred2: ObjectPrediction) -> Mask: mask1 = pred1.mask mask2 = pred2.mask union_mask = np.logical_or(mask1.bool_mask, mask2.bool_mask) - return Mask( - bool_mask=union_mask, - full_shape=mask1.full_shape, - shift_amount=mask1.shift_amount, - ) + return Mask(bool_mask=union_mask, full_shape=mask1.full_shape, shift_amount=mask1.shift_amount,) diff --git a/sahi/postprocess/legacy/match.py b/sahi/postprocess/legacy/match.py index 1b5717a7c..921af3cf8 100644 --- a/sahi/postprocess/legacy/match.py +++ b/sahi/postprocess/legacy/match.py @@ -46,9 +46,7 @@ class PredictionMatcher: """ def __init__( - self, - threshold: float = 0.5, - scorer: Callable[[BoxArray, BoxArray], float] = box_ios, + self, threshold: float = 0.5, scorer: Callable[[BoxArray, BoxArray], float] = box_ios, ): self._threshold = threshold self._scorer = scorer diff --git a/sahi/postprocess/legacy/merge.py b/sahi/postprocess/legacy/merge.py index c36e46135..f32632ae8 100644 --- a/sahi/postprocess/legacy/merge.py +++ b/sahi/postprocess/legacy/merge.py @@ -77,8 +77,7 @@ def merge_batch( """ if merge_type not in ["merge", "ensemble"]: raise ValueError( - 'Unknown merge type. Supported types are ["merge", "ensemble"], got type: ', - merge_type, + 'Unknown merge type. Supported types are ["merge", "ensemble"], got type: ', merge_type, ) unions = matcher.find_matched_predictions(predictions, ignore_class_label) @@ -116,11 +115,7 @@ def _store_merging_info(count, prediction, merge_type): else: prediction.merged = False - def _merge_pair( - self, - pred1: ObjectPrediction, - pred2: ObjectPrediction, - ) -> ObjectPrediction: + def _merge_pair(self, pred1: ObjectPrediction, pred2: ObjectPrediction,) -> ObjectPrediction: box1 = extract_box(pred1) box2 = extract_box(pred2) merged_box = list(self._merge_box(box1, box2)) @@ -159,11 +154,7 @@ def _assert_equal_labels(pred1: ObjectPrediction, pred2: ObjectPrediction): def _merge_box(self, box1: BoxArray, box2: BoxArray) -> BoxArray: return self._box_merger(box1, box2) - def _merge_score( - self, - pred1: ObjectPrediction, - pred2: ObjectPrediction, - ) -> float: + def _merge_score(self, pred1: ObjectPrediction, pred2: ObjectPrediction,) -> float: scores = [pred.score.value for pred in (pred1, pred2)] policy = self._score_merging_method if policy == ScoreMergingPolicy.SMALLER_SCORE: @@ -185,11 +176,7 @@ def _merge_mask(pred1: ObjectPrediction, pred2: ObjectPrediction) -> Mask: mask1 = pred1.mask mask2 = pred2.mask union_mask = np.logical_or(mask1.bool_mask, mask2.bool_mask) - return Mask( - bool_mask=union_mask, - full_shape=mask1.full_shape, - shift_amount=mask1.shift_amount, - ) + return Mask(bool_mask=union_mask, full_shape=mask1.full_shape, shift_amount=mask1.shift_amount,) def _validate_box_merger(self, box_merger: Callable): if box_merger.__name__ not in self.BOX_MERGERS: diff --git a/sahi/predict.py b/sahi/predict.py index db4e3ae41..ffe9cbf70 100644 --- a/sahi/predict.py +++ b/sahi/predict.py @@ -79,8 +79,7 @@ def get_prediction( time_start = time.time() # works only with 1 batch detection_model.convert_original_predictions( - shift_amount=shift_amount, - full_shape=full_shape, + shift_amount=shift_amount, full_shape=full_shape, ) object_prediction_list: List[ObjectPrediction] = detection_model.object_prediction_list # filter out predictions with lower score @@ -103,9 +102,7 @@ def get_prediction( if verbose == 1: print( - "Prediction performed in", - durations_in_seconds["prediction"], - "seconds.", + "Prediction performed in", durations_in_seconds["prediction"], "seconds.", ) return PredictionResult( @@ -227,10 +224,7 @@ def get_sliced_prediction( detection_model=detection_model, image_size=image_size, shift_amount=shift_amount_list[0], - full_shape=[ - slice_image_result.original_image_height, - slice_image_result.original_image_width, - ], + full_shape=[slice_image_result.original_image_height, slice_image_result.original_image_width,], ) object_prediction_list.extend(prediction_result.object_prediction_list) if num_slices > 1 and perform_standard_pred: @@ -258,14 +252,10 @@ def get_sliced_prediction( if verbose == 2: print( - "Slicing performed in", - durations_in_seconds["slice"], - "seconds.", + "Slicing performed in", durations_in_seconds["slice"], "seconds.", ) print( - "Prediction performed in", - durations_in_seconds["prediction"], - "seconds.", + "Prediction performed in", durations_in_seconds["prediction"], "seconds.", ) # merge matching predictions @@ -388,11 +378,7 @@ def predict( coco_json = [] elif os.path.isdir(source): time_start = time.time() - image_path_list = list_files( - directory=source, - contains=[".jpg", ".jpeg", ".png"], - verbose=verbose, - ) + image_path_list = list_files(directory=source, contains=[".jpg", ".jpeg", ".png"], verbose=verbose,) time_end = time.time() - time_start durations_in_seconds["list_files"] = time_end else: @@ -557,25 +543,17 @@ def predict( # print prediction duration if verbose == 1: print( - "Model loaded in", - durations_in_seconds["model_load"], - "seconds.", + "Model loaded in", durations_in_seconds["model_load"], "seconds.", ) print( - "Slicing performed in", - durations_in_seconds["slice"], - "seconds.", + "Slicing performed in", durations_in_seconds["slice"], "seconds.", ) print( - "Prediction performed in", - durations_in_seconds["prediction"], - "seconds.", + "Prediction performed in", durations_in_seconds["prediction"], "seconds.", ) if export_visual: print( - "Exporting performed in", - durations_in_seconds["export_files"], - "seconds.", + "Exporting performed in", durations_in_seconds["export_files"], "seconds.", ) @@ -727,19 +705,13 @@ def predict_fiftyone( # print prediction duration if verbose == 1: print( - "Model loaded in", - durations_in_seconds["model_load"], - "seconds.", + "Model loaded in", durations_in_seconds["model_load"], "seconds.", ) print( - "Slicing performed in", - durations_in_seconds["slice"], - "seconds.", + "Slicing performed in", durations_in_seconds["slice"], "seconds.", ) print( - "Prediction performed in", - durations_in_seconds["prediction"], - "seconds.", + "Prediction performed in", durations_in_seconds["prediction"], "seconds.", ) # visualize results @@ -747,11 +719,7 @@ def predict_fiftyone( session.dataset = dataset # Evaluate the predictions results = dataset.evaluate_detections( - model_type, - gt_field="ground_truth", - eval_key="eval", - iou=postprocess_match_threshold, - compute_mAP=True, + model_type, gt_field="ground_truth", eval_key="eval", iou=postprocess_match_threshold, compute_mAP=True, ) # Get the 10 most common classes in the dataset counts = dataset.count_values("ground_truth.detections.label") diff --git a/sahi/slicing.py b/sahi/slicing.py index 103fde34f..41e666cfc 100644 --- a/sahi/slicing.py +++ b/sahi/slicing.py @@ -318,19 +318,12 @@ def slice_image( # create sliced image and append to sliced_image_result sliced_image = SlicedImage( - image=np.asarray(image_pil_slice), - coco_image=coco_image, - starting_pixel=[slice_bbox[0], slice_bbox[1]], + image=np.asarray(image_pil_slice), coco_image=coco_image, starting_pixel=[slice_bbox[0], slice_bbox[1]], ) sliced_image_result.add_sliced_image(sliced_image) verboseprint( - "Num slices:", - n_ims, - "slice_height", - slice_height, - "slice_width", - slice_width, + "Num slices:", n_ims, "slice_height", slice_height, "slice_width", slice_width, ) verboseprint("Time to slice", image, time.time() - t0, "seconds") @@ -416,9 +409,7 @@ def slice_coco( # create and save coco dict coco_dict = create_coco_dict( - sliced_coco_images, - coco_dict["categories"], - ignore_negative_samples=ignore_negative_samples, + sliced_coco_images, coco_dict["categories"], ignore_negative_samples=ignore_negative_samples, ) save_path = "" if output_coco_annotation_file_name and output_dir: diff --git a/sahi/utils/coco.py b/sahi/utils/coco.py index 2acf6df7b..505e306ee 100644 --- a/sahi/utils/coco.py +++ b/sahi/utils/coco.py @@ -35,11 +35,7 @@ def from_coco_category(cls, category): category: Dict {"supercategory": "person", "id": 1, "name": "person"}, """ - return cls( - id=category["id"], - name=category["name"], - supercategory=category["supercategory"], - ) + return cls(id=category["id"], name=category["name"], supercategory=category["supercategory"],) @property def json(self): @@ -76,12 +72,7 @@ def from_coco_segmentation(cls, segmentation, category_id, category_name, iscrow iscrowd: int 0 or 1 """ - return cls( - segmentation=segmentation, - category_id=category_id, - category_name=category_name, - iscrowd=iscrowd, - ) + return cls(segmentation=segmentation, category_id=category_id, category_name=category_name, iscrowd=iscrowd,) @classmethod def from_coco_bbox(cls, bbox, category_id, category_name, iscrowd=0): @@ -98,12 +89,7 @@ def from_coco_bbox(cls, bbox, category_id, category_name, iscrowd=0): iscrowd: int 0 or 1 """ - return cls( - bbox=bbox, - category_id=category_id, - category_name=category_name, - iscrowd=iscrowd, - ) + return cls(bbox=bbox, category_id=category_id, category_name=category_name, iscrowd=iscrowd,) @classmethod def from_coco_annotation_dict(cls, annotation_dict: Dict, category_name: Optional[str] = None): @@ -125,18 +111,12 @@ def from_coco_annotation_dict(cls, annotation_dict: Dict, category_name: Optiona ) else: return cls( - bbox=annotation_dict["bbox"], - category_id=annotation_dict["category_id"], - category_name=category_name, + bbox=annotation_dict["bbox"], category_id=annotation_dict["category_id"], category_name=category_name, ) @classmethod def from_shapely_annotation( - cls, - shapely_annotation: ShapelyAnnotation, - category_id: int, - category_name: str, - iscrowd: int, + cls, shapely_annotation: ShapelyAnnotation, category_id: int, category_name: str, iscrowd: int, ): """ Creates CocoAnnotation object from ShapelyAnnotation object. @@ -147,24 +127,13 @@ def from_shapely_annotation( category_name (str): Category name of the annotation iscrowd (int): 0 or 1 """ - coco_annotation = cls( - bbox=[0, 0, 0, 0], - category_id=category_id, - category_name=category_name, - iscrowd=iscrowd, - ) + coco_annotation = cls(bbox=[0, 0, 0, 0], category_id=category_id, category_name=category_name, iscrowd=iscrowd,) coco_annotation._segmentation = shapely_annotation.to_coco_segmentation() coco_annotation._shapely_annotation = shapely_annotation return coco_annotation def __init__( - self, - segmentation=None, - bbox=None, - category_id=None, - category_name=None, - image_id=None, - iscrowd=0, + self, segmentation=None, bbox=None, category_id=None, category_name=None, image_id=None, iscrowd=0, ): """ Creates coco annotation object using bbox or segmentation @@ -391,14 +360,7 @@ def from_coco_annotation_dict(cls, category_name, annotation_dict, score, image_ ) def __init__( - self, - segmentation=None, - bbox=None, - category_id=None, - category_name=None, - image_id=None, - score=None, - iscrowd=0, + self, segmentation=None, bbox=None, category_id=None, category_name=None, image_id=None, score=None, iscrowd=0, ): """ @@ -463,14 +425,7 @@ class CocoVidAnnotation(CocoAnnotation): """ def __init__( - self, - bbox=None, - category_id=None, - category_name=None, - image_id=None, - instance_id=None, - iscrowd=0, - id=None, + self, bbox=None, category_id=None, category_name=None, image_id=None, instance_id=None, iscrowd=0, id=None, ): """ Args: @@ -490,11 +445,7 @@ def __init__( Annotation id """ super(CocoVidAnnotation, self).__init__( - bbox=bbox, - category_id=category_id, - category_name=category_name, - image_id=image_id, - iscrowd=iscrowd, + bbox=bbox, category_id=category_id, category_name=category_name, image_id=image_id, iscrowd=iscrowd, ) self.instance_id = instance_id self.id = id @@ -598,13 +549,7 @@ class CocoVidImage(CocoImage): """ def __init__( - self, - file_name, - height, - width, - video_id=None, - frame_id=None, - id=None, + self, file_name, height, width, video_id=None, frame_id=None, id=None, ): """ Creates CocoVidImage object @@ -686,12 +631,7 @@ class CocoVideo: """ def __init__( - self, - name: str, - id: int = None, - fps: float = None, - height: int = None, - width: int = None, + self, name: str, id: int = None, fps: float = None, height: int = None, width: int = None, ): """ Creates CocoVideo object @@ -759,11 +699,7 @@ def __repr__(self): class Coco: def __init__( - self, - name=None, - image_dir=None, - remapping_dict=None, - ignore_negative_samples=False, + self, name=None, image_dir=None, remapping_dict=None, ignore_negative_samples=False, ): """ Creates Coco object. @@ -933,8 +869,7 @@ def merge(self, coco, desired_name2id=None, verbose=1): # print categories if verbose: print( - "Categories are formed as:\n", - self.json_categories, + "Categories are formed as:\n", self.json_categories, ) @classmethod @@ -964,11 +899,7 @@ def from_coco_dict_or_path( category_mapping: dict """ # init coco object - coco = cls( - image_dir=image_dir, - remapping_dict=remapping_dict, - ignore_negative_samples=ignore_negative_samples, - ) + coco = cls(image_dir=image_dir, remapping_dict=remapping_dict, ignore_negative_samples=ignore_negative_samples,) assert ( type(coco_dict_or_path) == str or type(coco_dict_or_path) == dict @@ -1035,9 +966,7 @@ def category_mapping(self): @property def json(self): return create_coco_dict( - images=self.images, - categories=self.json_categories, - ignore_negative_samples=self.ignore_negative_samples, + images=self.images, categories=self.json_categories, ignore_negative_samples=self.ignore_negative_samples, ) @property @@ -1143,10 +1072,7 @@ def split_coco_as_train_val(self, train_split_rate=0.9, numpy_seed=0): val_images = shuffled_images[num_train:] # form train val coco objects - train_coco = Coco( - name=self.name if self.name else "split" + "_train", - image_dir=self.image_dir, - ) + train_coco = Coco(name=self.name if self.name else "split" + "_train", image_dir=self.image_dir,) train_coco.images = train_images train_coco.categories = self.categories @@ -1197,10 +1123,7 @@ def export_as_yolov5(self, output_dir, train_split_rate=1, numpy_seed=0, mp=Fals # split dataset if split_mode == "TRAINVAL": - result = self.split_coco_as_train_val( - train_split_rate=train_split_rate, - numpy_seed=numpy_seed, - ) + result = self.split_coco_as_train_val(train_split_rate=train_split_rate, numpy_seed=numpy_seed,) train_coco = result["train_coco"] val_coco = result["val_coco"] elif split_mode == "TRAIN": @@ -1223,17 +1146,11 @@ def export_as_yolov5(self, output_dir, train_split_rate=1, numpy_seed=0, mp=Fals # create image symlinks and annotation txts if split_mode in ["TRAINVAL", "TRAIN"]: export_yolov5_images_and_txts_from_coco_object( - output_dir=train_dir, - coco=train_coco, - ignore_negative_samples=self.ignore_negative_samples, - mp=mp, + output_dir=train_dir, coco=train_coco, ignore_negative_samples=self.ignore_negative_samples, mp=mp, ) if split_mode in ["TRAINVAL", "VAL"]: export_yolov5_images_and_txts_from_coco_object( - output_dir=val_dir, - coco=val_coco, - ignore_negative_samples=self.ignore_negative_samples, - mp=mp, + output_dir=val_dir, coco=val_coco, ignore_negative_samples=self.ignore_negative_samples, mp=mp, ) # create yolov5 data yaml @@ -1331,8 +1248,7 @@ def export_yolov5_images_and_txts_from_coco_object(output_dir, coco, ignore_nega with Pool(processes=48) as pool: args = [(coco_image, coco.image_dir, output_dir, ignore_negative_samples) for coco_image in coco.images] pool.starmap( - export_single_yolov5_image_and_corresponding_txt, - tqdm(args, total=len(args)), + export_single_yolov5_image_and_corresponding_txt, tqdm(args, total=len(args)), ) else: for coco_image in tqdm(coco.images): @@ -1376,8 +1292,7 @@ def export_single_yolov5_image_and_corresponding_txt( name_increment = 2 while Path(yolo_image_path).is_file(): yolo_image_path = yolo_image_path_temp.replace( - Path(coco_image.file_name).stem, - Path(coco_image.file_name).stem + "_" + str(name_increment), + Path(coco_image.file_name).stem, Path(coco_image.file_name).stem + "_" + str(name_increment), ) name_increment += 1 # create a symbolic link pointing to coco_image_path named yolo_image_path @@ -1585,8 +1500,7 @@ def merge_from_list(coco_dict_list, desired_name2id=None, verbose=1): # print categories if verbose: print( - "Categories are formed as:\n", - merged_coco_dict["categories"], + "Categories are formed as:\n", merged_coco_dict["categories"], ) return merged_coco_dict @@ -1617,9 +1531,7 @@ def merge_from_file(coco_path1: str, coco_path2: str, save_path: str): save_json(merged_coco_dict, save_path) -def get_imageid2annotationlist_mapping( - coco_dict: dict, -) -> Dict[int, List[CocoAnnotation]]: +def get_imageid2annotationlist_mapping(coco_dict: dict,) -> Dict[int, List[CocoAnnotation]]: """ Get image_id to annotationlist mapping for faster indexing. @@ -1726,11 +1638,7 @@ def create_coco_dict(images, categories, ignore_negative_samples=False): def split_coco_as_train_val( - coco_file_path_or_dict, - file_name=None, - target_dir=None, - train_split_rate=0.9, - numpy_seed=0, + coco_file_path_or_dict, file_name=None, target_dir=None, train_split_rate=0.9, numpy_seed=0, ): """ Takes single coco dataset file path, split images into train-val and saves as seperate coco dataset files. @@ -1824,10 +1732,7 @@ def split_coco_as_train_val( def add_bbox_and_area_to_coco( - source_coco_path: str = "", - target_coco_path: str = "", - add_bbox: bool = True, - add_area: bool = True, + source_coco_path: str = "", target_coco_path: str = "", add_bbox: bool = True, add_area: bool = True, ) -> dict: """ Takes single coco dataset file path, calculates and fills bbox and area fields of the annotations diff --git a/sahi/utils/cv.py b/sahi/utils/cv.py index 7f95f47a9..eb5f8ffc0 100644 --- a/sahi/utils/cv.py +++ b/sahi/utils/cv.py @@ -37,16 +37,9 @@ def crop_object_predictions( category_id = object_prediction.category.id # crop detections # deepcopy crops so that original is not altered - cropped_img = copy.deepcopy( - image[ - int(bbox[1]) : int(bbox[3]), - int(bbox[0]) : int(bbox[2]), - :, - ] - ) + cropped_img = copy.deepcopy(image[int(bbox[1]) : int(bbox[3]), int(bbox[0]) : int(bbox[2]), :,]) save_path = os.path.join( - output_dir, - file_name + "_box" + str(ind) + "_class" + str(category_id) + "." + export_format, + output_dir, file_name + "_box" + str(ind) + "_class" + str(category_id) + "." + export_format, ) cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR)) @@ -185,11 +178,7 @@ def visualize_prediction( image = cv2.addWeighted(image, 1, rgb_mask, 0.7, 0) # visualize boxes cv2.rectangle( - image, - tuple(box[0:2]), - tuple(box[2:4]), - color=color, - thickness=rect_th, + image, tuple(box[0:2]), tuple(box[2:4]), color=color, thickness=rect_th, ) # arange bounding box text location if box[1] - 10 > 10: @@ -198,13 +187,7 @@ def visualize_prediction( box[1] += 10 # add bounding box text cv2.putText( - image, - class_, - tuple(box[0:2]), - cv2.FONT_HERSHEY_SIMPLEX, - text_size, - color, - thickness=text_th, + image, class_, tuple(box[0:2]), cv2.FONT_HERSHEY_SIMPLEX, text_size, color, thickness=text_th, ) if output_dir: # create output folder if not present @@ -264,11 +247,7 @@ def visualize_object_predictions( image = cv2.addWeighted(image, 1, rgb_mask, 0.4, 0) # visualize boxes cv2.rectangle( - image, - tuple(bbox[0:2]), - tuple(bbox[2:4]), - color=color, - thickness=rect_th, + image, tuple(bbox[0:2]), tuple(bbox[2:4]), color=color, thickness=rect_th, ) # arange bounding box text location if bbox[1] - 5 > 5: @@ -278,13 +257,7 @@ def visualize_object_predictions( # add bounding box text label = "%s %.2f" % (category_name, score) cv2.putText( - image, - label, - tuple(bbox[0:2]), - cv2.FONT_HERSHEY_SIMPLEX, - text_size, - color, - thickness=text_th, + image, label, tuple(bbox[0:2]), cv2.FONT_HERSHEY_SIMPLEX, text_size, color, thickness=text_th, ) if output_dir: # create output folder if not present diff --git a/sahi/utils/fiftyone.py b/sahi/utils/fiftyone.py index 476761471..782137e6d 100644 --- a/sahi/utils/fiftyone.py +++ b/sahi/utils/fiftyone.py @@ -26,17 +26,9 @@ def __init__( seed=None, max_samples=None, ): - data_path = self._parse_data_path( - dataset_dir=dataset_dir, - data_path=data_path, - default="data/", - ) + data_path = self._parse_data_path(dataset_dir=dataset_dir, data_path=data_path, default="data/",) - labels_path = self._parse_labels_path( - dataset_dir=dataset_dir, - labels_path=labels_path, - default="labels.json", - ) + labels_path = self._parse_labels_path(dataset_dir=dataset_dir, labels_path=labels_path, default="labels.json",) label_types = _parse_label_types(label_types) @@ -44,10 +36,7 @@ def __init__( label_types.append("coco_id") super().__init__( - dataset_dir=dataset_dir, - shuffle=shuffle, - seed=seed, - max_samples=max_samples, + dataset_dir=dataset_dir, shuffle=shuffle, seed=seed, max_samples=max_samples, ) self.data_path = data_path @@ -71,13 +60,9 @@ def __init__( def setup(self): if self.labels_path is not None and os.path.isfile(self.labels_path): - ( - info, - classes, - supercategory_map, - images, - annotations, - ) = load_coco_detection_annotations(self.labels_path, extra_attrs=self.extra_attrs) + (info, classes, supercategory_map, images, annotations,) = load_coco_detection_annotations( + self.labels_path, extra_attrs=self.extra_attrs + ) if classes is not None: info["classes"] = classes diff --git a/sahi/utils/file.py b/sahi/utils/file.py index a06754979..35d4f32d0 100644 --- a/sahi/utils/file.py +++ b/sahi/utils/file.py @@ -77,11 +77,7 @@ def load_json(load_path): return data -def list_files( - directory: str, - contains: list = [".json"], - verbose: int = 1, -) -> list: +def list_files(directory: str, contains: list = [".json"], verbose: int = 1,) -> list: """ Walk given directory and return a list of file path with desired extension @@ -238,6 +234,5 @@ def download_from_url(from_url: str, to_path: str): if not path.exists(to_path): urllib.request.urlretrieve( - from_url, - to_path, + from_url, to_path, ) diff --git a/sahi/utils/mmdet.py b/sahi/utils/mmdet.py index bec388bde..d4c2ed4c0 100644 --- a/sahi/utils/mmdet.py +++ b/sahi/utils/mmdet.py @@ -45,8 +45,7 @@ def download_mmdet_cascade_mask_rcnn_model(destination_path: Optional[str] = Non if not path.exists(destination_path): urllib.request.urlretrieve( - MmdetTestConstants.MMDET_CASCADEMASKRCNN_MODEL_URL, - destination_path, + MmdetTestConstants.MMDET_CASCADEMASKRCNN_MODEL_URL, destination_path, ) @@ -59,8 +58,7 @@ def download_mmdet_retinanet_model(destination_path: Optional[str] = None): if not path.exists(destination_path): urllib.request.urlretrieve( - MmdetTestConstants.MMDET_RETINANET_MODEL_URL, - destination_path, + MmdetTestConstants.MMDET_RETINANET_MODEL_URL, destination_path, ) @@ -108,8 +106,7 @@ def download_mmdet_config( # download main config file urllib.request.urlretrieve( - main_config_url, - main_config_path, + main_config_url, main_config_path, ) # read main config file @@ -130,8 +127,7 @@ def download_mmdet_config( # download secondary config files urllib.request.urlretrieve( - config_url, - str(config_path), + config_url, str(config_path), ) # set final config dirs @@ -165,7 +161,5 @@ def download_mmdet_config( if __name__ == "__main__": download_mmdet_config( - model_name="cascade_rcnn", - config_file_name="cascade_mask_rcnn_r50_fpn_1x_coco.py", - verbose=False, + model_name="cascade_rcnn", config_file_name="cascade_mask_rcnn_r50_fpn_1x_coco.py", verbose=False, ) diff --git a/sahi/utils/shapely.py b/sahi/utils/shapely.py index 5a01c887d..67d155e6b 100644 --- a/sahi/utils/shapely.py +++ b/sahi/utils/shapely.py @@ -77,9 +77,7 @@ def from_coco_bbox(cls, bbox: List[int], slice_bbox: List[int] = None): slice_bbox (List[int]): [x_min, y_min, x_max, y_max] Is used to calculate sliced coco coordinates. """ - shapely_polygon = get_shapely_box( - x=bbox[0], y=bbox[1], width=bbox[2], height=bbox[3] - ) + shapely_polygon = get_shapely_box(x=bbox[0], y=bbox[1], width=bbox[2], height=bbox[3]) shapely_multipolygon = MultiPolygon([shapely_polygon]) return cls(multipolygon=shapely_multipolygon, slice_bbox=slice_bbox) @@ -163,11 +161,7 @@ def to_coco_segmentation(self): if coco_polygon[:2] == coco_polygon[-2:]: del coco_polygon[-2:] # append coco_polygon to coco_segmentation - coco_polygon = ( - [round(point) for point in coco_polygon] - if coco_polygon - else coco_polygon - ) + coco_polygon = [round(point) for point in coco_polygon] if coco_polygon else coco_polygon coco_segmentation.append(coco_polygon) return coco_segmentation @@ -190,10 +184,7 @@ def to_opencv_contours(self): miny = self.slice_bbox[1] x_coords = [x_coord - minx for x_coord in x_coords] y_coords = [y_coord - miny for y_coord in y_coords] - opencv_contour = [ - [[int(x_coords[ind]), int(y_coords[ind])]] - for ind in range(len(x_coords)) - ] + opencv_contour = [[[int(x_coords[ind]), int(y_coords[ind])]] for ind in range(len(x_coords))] else: opencv_contour: List = [] # append opencv_contour to opencv_contours @@ -295,8 +286,6 @@ def get_intersection(self, polygon: Polygon): else: intersection_multipolygon = MultiPolygon([]) # create shapely annotation from intersection multipolygon - intersection_shapely_annotation = ShapelyAnnotation( - intersection_multipolygon, slice_bbox - ) + intersection_shapely_annotation = ShapelyAnnotation(intersection_multipolygon, slice_bbox) return intersection_shapely_annotation diff --git a/sahi/utils/yolov5.py b/sahi/utils/yolov5.py index fffc5757c..4835196c4 100644 --- a/sahi/utils/yolov5.py +++ b/sahi/utils/yolov5.py @@ -21,6 +21,5 @@ def download_yolov5s6_model(destination_path: Optional[str] = None): if not path.exists(destination_path): urllib.request.urlretrieve( - Yolov5TestConstants.YOLOV5S6_MODEL_URL, - destination_path, + Yolov5TestConstants.YOLOV5S6_MODEL_URL, destination_path, ) diff --git a/scripts/coco2yolov5.py b/scripts/coco2yolov5.py index 637431e5a..39148b2c4 100644 --- a/scripts/coco2yolov5.py +++ b/scripts/coco2yolov5.py @@ -5,41 +5,24 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() + parser.add_argument("--source", type=str, default="", help="directory for coco images") parser.add_argument( - "--source", type=str, default="", help="directory for coco images" + "--coco_file", type=str, default=None, help="file path for the coco file to be converted", ) parser.add_argument( - "--coco_file", - type=str, - default=None, - help="file path for the coco file to be converted", - ) - parser.add_argument( - "--train_split", - type=float, - default=0.9, - help="set the training split ratio", - ) - parser.add_argument( - "--project", default="runs/coco2yolov5", help="save results to project/name" + "--train_split", type=float, default=0.9, help="set the training split ratio", ) + parser.add_argument("--project", default="runs/coco2yolov5", help="save results to project/name") parser.add_argument("--name", default="exp", help="save results to project/name") - parser.add_argument( - "--seed", type=int, default=1, help="fix the seed for reproducibility" - ) + parser.add_argument("--seed", type=int, default=1, help="fix the seed for reproducibility") opt = parser.parse_args() # increment run save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=False)) # load coco dict - coco = Coco.from_coco_dict_or_path( - coco_dict_or_path=opt.coco_file, - image_dir=opt.source, - ) + coco = Coco.from_coco_dict_or_path(coco_dict_or_path=opt.coco_file, image_dir=opt.source,) # export as yolov5 coco.export_as_yolov5( - output_dir=str(save_dir), - train_split_rate=opt.train_split, - numpy_seed=opt.seed, + output_dir=str(save_dir), train_split_rate=opt.train_split, numpy_seed=opt.seed, ) diff --git a/scripts/coco_error_analysis.py b/scripts/coco_error_analysis.py index 9a4753937..f8c7fcce9 100644 --- a/scripts/coco_error_analysis.py +++ b/scripts/coco_error_analysis.py @@ -41,11 +41,7 @@ def makeplot(rs, ps, outDir, class_name, iou_type): for k in range(len(types)): ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5) ax.fill_between( - rs, - ps_curve[k], - ps_curve[k + 1], - color=cs[k], - label=str(f"[{aps[k]:.3f}]" + types[k]), + rs, ps_curve[k], ps_curve[k + 1], color=cs[k], label=str(f"[{aps[k]:.3f}]" + types[k]), ) plt.xlabel("recall") plt.ylabel("precision") @@ -89,12 +85,7 @@ def makebarplot(rs, ps, outDir, class_name, iou_type): type_ps = ps[i, ..., 0] aps = [ps_.mean() for ps_ in type_ps.T] rects_list.append( - ax.bar( - x - width / 2 + (i + 1) * width / len(types), - aps, - width / len(types), - label=types[i], - ) + ax.bar(x - width / 2 + (i + 1) * width / len(types), aps, width / len(types), label=types[i],) ) # Add some text for labels, title and custom x-axis tick labels, etc. @@ -324,11 +315,7 @@ def main(): parser.add_argument("--types", type=str, nargs="+", default=["bbox"], help="result types") parser.add_argument("--extraplots", action="store_true", help="export extra bar/stat plots") parser.add_argument( - "--areas", - type=int, - nargs="+", - default=[1024, 9216, 10000000000], - help="area regions", + "--areas", type=int, nargs="+", default=[1024, 9216, 10000000000], help="area regions", ) args = parser.parse_args() diff --git a/scripts/predict.py b/scripts/predict.py index 956887618..7426483ca 100644 --- a/scripts/predict.py +++ b/scripts/predict.py @@ -12,28 +12,16 @@ help="mmdet for 'MmdetDetectionModel', 'yolov5' for 'Yolov5DetectionModel'", ) parser.add_argument( - "--model_path", - type=str, - default="", - help="path for the model", + "--model_path", type=str, default="", help="path for the model", ) parser.add_argument( - "--config_path", - type=str, - default="", - help="path for the model config", + "--config_path", type=str, default="", help="path for the model config", ) parser.add_argument( - "--conf_thresh", - type=float, - default=0.25, - help="all predictions with score < conf_thresh will be discarded", + "--conf_thresh", type=float, default=0.25, help="all predictions with score < conf_thresh will be discarded", ) parser.add_argument( - "--device", - type=str, - default=None, - help="cpu or cuda", + "--device", type=str, default=None, help="cpu or cuda", ) parser.add_argument( "--category_mapping", @@ -70,9 +58,7 @@ parser.add_argument("--match_metric", type=str, default="IOS", help="match metric for postprocess: 'IOU' or 'IOS'") parser.add_argument("--match_thresh", type=float, default=0.5, help="match threshold for postprocess") parser.add_argument( - "--class_agnostic", - action="store_true", - help="Postprocess will ignore category ids.", + "--class_agnostic", action="store_true", help="Postprocess will ignore category ids.", ) parser.add_argument("--visual_export_format", type=str, default="png") diff --git a/scripts/predict_fiftyone.py b/scripts/predict_fiftyone.py index a85abfe62..3fc5e7d65 100644 --- a/scripts/predict_fiftyone.py +++ b/scripts/predict_fiftyone.py @@ -18,28 +18,16 @@ help="mmdet for 'MmdetDetectionModel', 'yolov5' for 'Yolov5DetectionModel'", ) parser.add_argument( - "--model_path", - type=str, - default="", - help="path for the model", + "--model_path", type=str, default="", help="path for the model", ) parser.add_argument( - "--config_path", - type=str, - default="", - help="path for the model config", + "--config_path", type=str, default="", help="path for the model config", ) parser.add_argument( - "--conf_thresh", - type=float, - default=0.25, - help="all predictions with score < conf_thresh will be discarded", + "--conf_thresh", type=float, default=0.25, help="all predictions with score < conf_thresh will be discarded", ) parser.add_argument( - "--device", - type=str, - default=None, - help="cpu or cuda", + "--device", type=str, default=None, help="cpu or cuda", ) parser.add_argument( "--category_mapping", @@ -65,9 +53,7 @@ parser.add_argument("--match_metric", type=str, default="IOS", help="match metric for postprocess: 'IOU' or 'IOS'") parser.add_argument("--match_thresh", type=float, default=0.5, help="match threshold for postprocess") parser.add_argument( - "--class_agnostic", - action="store_true", - help="Postprocess will ignore category ids.", + "--class_agnostic", action="store_true", help="Postprocess will ignore category ids.", ) opt = parser.parse_args() diff --git a/scripts/slice_coco.py b/scripts/slice_coco.py index 9a0c1b6f1..269c0a853 100644 --- a/scripts/slice_coco.py +++ b/scripts/slice_coco.py @@ -9,10 +9,7 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "coco_json_path", - type=str, - default=None, - help="path to coco annotation json file", + "coco_json_path", type=str, default=None, help="path to coco annotation json file", ) parser.add_argument("coco_image_dir", type=str, default="", help="folder containing coco images") parser.add_argument("--slice_size", type=int, nargs="+", default=[512], help="slice size") @@ -57,6 +54,5 @@ output_coco_annotation_file_path = os.path.join(output_dir, sliced_coco_name + ".json") save_json(coco_dict, output_coco_annotation_file_path) print( - f"Sliced 'slice_size: {slice_size}' coco file is saved to", - output_coco_annotation_file_path, + f"Sliced 'slice_size: {slice_size}' coco file is saved to", output_coco_annotation_file_path, ) diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn.py b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn.py index bf77e00c8..033a88843 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn.py @@ -12,23 +12,14 @@ norm_eval=True, style="pytorch", ), - neck=dict( - type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 - ), + neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type="RPNHead", in_channels=256, feat_channels=256, - anchor_generator=dict( - type="AnchorGenerator", - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - ), + anchor_generator=dict(type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64],), bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0], ), loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), @@ -51,14 +42,10 @@ roi_feat_size=7, num_classes=80, bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.1, 0.1, 0.2, 0.2], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), dict( @@ -68,14 +55,10 @@ roi_feat_size=7, num_classes=80, bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.05, 0.05, 0.1, 0.1], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.05, 0.05, 0.1, 0.1], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), dict( @@ -90,9 +73,7 @@ target_stds=[0.033, 0.033, 0.067, 0.067], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), ], @@ -122,13 +103,7 @@ match_low_quality=True, ignore_iof_thr=-1, ), - sampler=dict( - type="RandomSampler", - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False, - ), + sampler=dict(type="RandomSampler", num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False,), allowed_border=0, pos_weight=-1, debug=False, @@ -152,11 +127,7 @@ ignore_iof_thr=-1, ), sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, ), mask_size=28, pos_weight=-1, @@ -172,11 +143,7 @@ ignore_iof_thr=-1, ), sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, ), mask_size=28, pos_weight=-1, @@ -192,11 +159,7 @@ ignore_iof_thr=-1, ), sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, ), mask_size=28, pos_weight=-1, @@ -213,11 +176,6 @@ nms=dict(type="nms", iou_threshold=0.7), min_bbox_size=0, ), - rcnn=dict( - score_thr=0.05, - nms=dict(type="nms", iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5, - ), + rcnn=dict(score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5,), ), ) diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py index fbdafcec5..8c747b1a3 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py @@ -1,5 +1 @@ -_base_ = [ - 'cascade_mask_rcnn_r50_fpn.py', - 'coco_instance.py', - 'schedule_1x.py', 'default_runtime.py' -] +_base_ = ["cascade_mask_rcnn_r50_fpn.py", "coco_instance.py", "schedule_1x.py", "default_runtime.py"] diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_v280.py b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_v280.py index 7aea99107..c2c341585 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_v280.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco_v280.py @@ -1,5 +1 @@ -_base_ = [ - 'cascade_mask_rcnn_r50_fpn_v280.py', - 'coco_instance.py', - 'schedule_1x.py', 'default_runtime.py' -] +_base_ = ["cascade_mask_rcnn_r50_fpn_v280.py", "coco_instance.py", "schedule_1x.py", "default_runtime.py"] diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_v280.py b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_v280.py index 8735a76af..f2ac9c894 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_v280.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/cascade_mask_rcnn_r50_fpn_v280.py @@ -12,23 +12,14 @@ norm_eval=True, style="pytorch", ), - neck=dict( - type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 - ), + neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type="RPNHead", in_channels=256, feat_channels=256, - anchor_generator=dict( - type="AnchorGenerator", - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64], - ), + anchor_generator=dict(type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64],), bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[1.0, 1.0, 1.0, 1.0], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0], ), loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), @@ -51,14 +42,10 @@ roi_feat_size=7, num_classes=80, bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.1, 0.1, 0.2, 0.2], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.1, 0.1, 0.2, 0.2], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), dict( @@ -68,14 +55,10 @@ roi_feat_size=7, num_classes=80, bbox_coder=dict( - type="DeltaXYWHBBoxCoder", - target_means=[0.0, 0.0, 0.0, 0.0], - target_stds=[0.05, 0.05, 0.1, 0.1], + type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[0.05, 0.05, 0.1, 0.1], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), dict( @@ -90,9 +73,7 @@ target_stds=[0.033, 0.033, 0.067, 0.067], ), reg_class_agnostic=True, - loss_cls=dict( - type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 - ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), ), ], @@ -123,24 +104,13 @@ match_low_quality=True, ignore_iof_thr=-1, ), - sampler=dict( - type="RandomSampler", - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False, - ), + sampler=dict(type="RandomSampler", num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False,), allowed_border=0, pos_weight=-1, debug=False, ), rpn_proposal=dict( - nms_across_levels=False, - nms_pre=2000, - nms_post=2000, - max_num=2000, - nms_thr=0.7, - min_bbox_size=0, + nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0, ), rcnn=[ dict( @@ -152,13 +122,7 @@ match_low_quality=False, ignore_iof_thr=-1, ), - sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - ), + sampler=dict(type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True,), mask_size=28, pos_weight=-1, debug=False, @@ -172,13 +136,7 @@ match_low_quality=False, ignore_iof_thr=-1, ), - sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - ), + sampler=dict(type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True,), mask_size=28, pos_weight=-1, debug=False, @@ -192,13 +150,7 @@ match_low_quality=False, ignore_iof_thr=-1, ), - sampler=dict( - type="RandomSampler", - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True, - ), + sampler=dict(type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True,), mask_size=28, pos_weight=-1, debug=False, @@ -206,18 +158,6 @@ ], ) test_cfg = dict( - rpn=dict( - nms_across_levels=False, - nms_pre=1000, - nms_post=1000, - max_num=1000, - nms_thr=0.7, - min_bbox_size=0, - ), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5, - ), + rpn=dict(nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0,), + rcnn=dict(score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5,), ) diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/coco_instance.py b/tests/data/models/mmdet_cascade_mask_rcnn/coco_instance.py index bdc0f2f30..1661d18cd 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/coco_instance.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/coco_instance.py @@ -1,8 +1,6 @@ dataset_type = "CocoDataset" data_root = "data/coco/" -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True -) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type="LoadImageFromFile"), dict(type="LoadAnnotations", with_bbox=True, with_mask=True), diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/default_runtime.py b/tests/data/models/mmdet_cascade_mask_rcnn/default_runtime.py index 594de8dcc..75ee67b9f 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/default_runtime.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/default_runtime.py @@ -3,12 +3,13 @@ log_config = dict( interval=50, hooks=[ - dict(type='TextLoggerHook'), + dict(type="TextLoggerHook"), # dict(type='TensorboardLoggerHook') - ]) + ], +) # yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' +dist_params = dict(backend="nccl") +log_level = "INFO" load_from = None resume_from = None -workflow = [('train', 1)] +workflow = [("train", 1)] diff --git a/tests/data/models/mmdet_cascade_mask_rcnn/schedule_1x.py b/tests/data/models/mmdet_cascade_mask_rcnn/schedule_1x.py index 12694c87a..cc7fa00a8 100644 --- a/tests/data/models/mmdet_cascade_mask_rcnn/schedule_1x.py +++ b/tests/data/models/mmdet_cascade_mask_rcnn/schedule_1x.py @@ -1,11 +1,6 @@ # optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) +lr_config = dict(policy="step", warmup="linear", warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) total_epochs = 12 diff --git a/tests/data/models/mmdet_retinanet/coco_detection.py b/tests/data/models/mmdet_retinanet/coco_detection.py index 7b3f33bd9..67dbc3efb 100644 --- a/tests/data/models/mmdet_retinanet/coco_detection.py +++ b/tests/data/models/mmdet_retinanet/coco_detection.py @@ -1,8 +1,6 @@ dataset_type = "CocoDataset" data_root = "data/coco/" -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True -) +img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type="LoadImageFromFile"), dict(type="LoadAnnotations", with_bbox=True), diff --git a/tests/data/models/mmdet_retinanet/default_runtime.py b/tests/data/models/mmdet_retinanet/default_runtime.py index 594de8dcc..75ee67b9f 100644 --- a/tests/data/models/mmdet_retinanet/default_runtime.py +++ b/tests/data/models/mmdet_retinanet/default_runtime.py @@ -3,12 +3,13 @@ log_config = dict( interval=50, hooks=[ - dict(type='TextLoggerHook'), + dict(type="TextLoggerHook"), # dict(type='TensorboardLoggerHook') - ]) + ], +) # yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' +dist_params = dict(backend="nccl") +log_level = "INFO" load_from = None resume_from = None -workflow = [('train', 1)] +workflow = [("train", 1)] diff --git a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn.py b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn.py index 47fe98c2e..4f4f60db2 100644 --- a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn.py +++ b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn.py @@ -1,60 +1,50 @@ # model settings model = dict( - type='RetinaNet', - pretrained='torchvision://resnet50', + type="RetinaNet", + pretrained="torchvision://resnet50", backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), + norm_cfg=dict(type="BN", requires_grad=True), norm_eval=True, - style='pytorch'), + style="pytorch", + ), neck=dict( - type='FPN', + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, - add_extra_convs='on_input', - num_outs=5), + add_extra_convs="on_input", + num_outs=5, + ), bbox_head=dict( - type='RetinaHead', + type="RetinaHead", num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( - type='AnchorGenerator', + type="AnchorGenerator", octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + strides=[8, 16, 32, 64, 128], + ), + bbox_coder=dict(type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), # training and testing settings train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), + assigner=dict(type="MaxIoUAssigner", pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, - debug=False), + debug=False, + ), test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) + nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100 + ), +) diff --git a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco.py b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco.py index ef47d313b..75ffb614b 100644 --- a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco.py +++ b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco.py @@ -1,7 +1,3 @@ -_base_ = [ - 'retinanet_r50_fpn.py', - 'coco_detection.py', - 'schedule_1x.py', 'default_runtime.py' -] +_base_ = ["retinanet_r50_fpn.py", "coco_detection.py", "schedule_1x.py", "default_runtime.py"] # optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco_v280.py b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco_v280.py index e6eb39229..fdf669a8f 100644 --- a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco_v280.py +++ b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_1x_coco_v280.py @@ -1,7 +1,3 @@ -_base_ = [ - 'retinanet_r50_fpn_v280.py', - 'coco_detection.py', - 'schedule_1x.py', 'default_runtime.py' -] +_base_ = ["retinanet_r50_fpn_v280.py", "coco_detection.py", "schedule_1x.py", "default_runtime.py"] # optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) +optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_v280.py b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_v280.py index a08b14f60..cefd366d0 100644 --- a/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_v280.py +++ b/tests/data/models/mmdet_retinanet/retinanet_r50_fpn_v280.py @@ -1,60 +1,48 @@ # model settings model = dict( - type='RetinaNet', - pretrained='torchvision://resnet50', + type="RetinaNet", + pretrained="torchvision://resnet50", backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), + norm_cfg=dict(type="BN", requires_grad=True), norm_eval=True, - style='pytorch'), + style="pytorch", + ), neck=dict( - type='FPN', + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, - add_extra_convs='on_input', - num_outs=5), + add_extra_convs="on_input", + num_outs=5, + ), bbox_head=dict( - type='RetinaHead', + type="RetinaHead", num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( - type='AnchorGenerator', + type="AnchorGenerator", octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))) + strides=[8, 16, 32, 64, 128], + ), + bbox_coder=dict(type="DeltaXYWHBBoxCoder", target_means=[0.0, 0.0, 0.0, 0.0], target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict(type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), +) # training and testing settings train_cfg = dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), + assigner=dict(type="MaxIoUAssigner", pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, - debug=False) -test_cfg = dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) + debug=False, +) +test_cfg = dict(nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100) diff --git a/tests/data/models/mmdet_retinanet/schedule_1x.py b/tests/data/models/mmdet_retinanet/schedule_1x.py index 12694c87a..cc7fa00a8 100644 --- a/tests/data/models/mmdet_retinanet/schedule_1x.py +++ b/tests/data/models/mmdet_retinanet/schedule_1x.py @@ -1,11 +1,6 @@ # optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) +lr_config = dict(policy="step", warmup="linear", warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) total_epochs = 12 diff --git a/tests/test_annotation.py b/tests/test_annotation.py index f95101712..112fb8590 100644 --- a/tests/test_annotation.py +++ b/tests/test_annotation.py @@ -38,9 +38,7 @@ def test_mask(self): full_shape_height, full_shape_width = 500, 600 full_shape = [full_shape_height, full_shape_width] - mask = Mask.from_coco_segmentation( - segmentation=coco_segmentation, full_shape=full_shape - ) + mask = Mask.from_coco_segmentation(segmentation=coco_segmentation, full_shape=full_shape) self.assertEqual(mask.full_shape_height, full_shape_height) self.assertEqual(mask.full_shape_width, full_shape_width) diff --git a/tests/test_cocoutils.py b/tests/test_cocoutils.py index 81f9c4f19..740e6e589 100644 --- a/tests/test_cocoutils.py +++ b/tests/test_cocoutils.py @@ -16,16 +16,10 @@ def test_coco_categories(self): category_id = 0 category_name = "human" supercategory = "human" - coco_category1 = CocoCategory( - id=category_id, name=category_name, supercategory=supercategory - ) + coco_category1 = CocoCategory(id=category_id, name=category_name, supercategory=supercategory) coco_category2 = CocoCategory(id=category_id, name=category_name) coco_category3 = CocoCategory.from_coco_category( - { - "id": category_id, - "name": category_name, - "supercategory": supercategory, - } + {"id": category_id, "name": category_name, "supercategory": supercategory,} ) self.assertEqual(coco_category1.id, category_id) @@ -51,9 +45,7 @@ def test_coco_annotation(self): category_id = 3 category_name = "car" coco_annotation = CocoAnnotation.from_coco_segmentation( - segmentation=coco_segmentation, - category_id=category_id, - category_name=category_name, + segmentation=coco_segmentation, category_id=category_id, category_name=category_name, ) self.assertAlmostEqual(coco_annotation.area, 41177, 1) @@ -65,9 +57,7 @@ def test_coco_annotation(self): coco_bbox = [1, 1, 100, 100] category_id = 3 coco_annotation = CocoAnnotation.from_coco_bbox( - bbox=coco_bbox, - category_id=category_id, - category_name=category_name, + bbox=coco_bbox, category_id=category_id, category_name=category_name, ) self.assertEqual(coco_annotation.area, 10000) @@ -115,9 +105,7 @@ def test_coco_image(self): category_id = 3 category_name = "car" coco_annotation_1 = CocoAnnotation.from_coco_segmentation( - segmentation=coco_segmentation, - category_id=category_id, - category_name=category_name, + segmentation=coco_segmentation, category_id=category_id, category_name=category_name, ) coco_image.add_annotation(coco_annotation_1) @@ -271,32 +259,25 @@ def test_coco(self): self.assertEqual(coco1.images[2].annotations[1].category_name, "human") self.assertEqual(coco2.images[2].annotations[1].category_name, "human") self.assertEqual( - coco1.images[1].annotations[1].segmentation, - [[501, 451, 622, 451, 622, 543, 501, 543]], + coco1.images[1].annotations[1].segmentation, [[501, 451, 622, 451, 622, 543, 501, 543]], ) self.assertEqual( - coco2.images[1].annotations[1].segmentation, - [[501, 451, 622, 451, 622, 543, 501, 543]], + coco2.images[1].annotations[1].segmentation, [[501, 451, 622, 451, 622, 543, 501, 543]], ) self.assertEqual( - coco1.category_mapping, - category_mapping, + coco1.category_mapping, category_mapping, ) self.assertEqual( - coco2.category_mapping, - category_mapping, + coco2.category_mapping, category_mapping, ) self.assertEqual( - coco1.stats, - coco2.stats, + coco1.stats, coco2.stats, ) self.assertEqual( - coco1.stats["num_images"], - len(coco1.images), + coco1.stats["num_images"], len(coco1.images), ) self.assertEqual( - coco1.stats["num_annotations"], - len(coco1.json["annotations"]), + coco1.stats["num_annotations"], len(coco1.json["annotations"]), ) def test_split_coco_as_train_val(self): @@ -310,24 +291,18 @@ def test_split_coco_as_train_val(self): self.assertEqual(len(result["train_coco"].json["annotations"]), 5) self.assertEqual(result["train_coco"].json["images"][0]["height"], 682) self.assertEqual(result["train_coco"].image_dir, image_dir) + self.assertEqual(result["train_coco"].stats["num_images"], len(result["train_coco"].images)) self.assertEqual( - result["train_coco"].stats["num_images"], len(result["train_coco"].images) - ) - self.assertEqual( - result["train_coco"].stats["num_annotations"], - len(result["train_coco"].json["annotations"]), + result["train_coco"].stats["num_annotations"], len(result["train_coco"].json["annotations"]), ) self.assertEqual(len(result["val_coco"].json["images"]), 1) self.assertEqual(len(result["val_coco"].json["annotations"]), 7) self.assertEqual(result["val_coco"].json["images"][0]["height"], 1365) self.assertEqual(result["val_coco"].image_dir, image_dir) + self.assertEqual(result["val_coco"].stats["num_images"], len(result["val_coco"].images)) self.assertEqual( - result["val_coco"].stats["num_images"], len(result["val_coco"].images) - ) - self.assertEqual( - result["val_coco"].stats["num_annotations"], - len(result["val_coco"].json["annotations"]), + result["val_coco"].stats["num_annotations"], len(result["val_coco"].json["annotations"]), ) def test_coco2yolo(self): @@ -349,16 +324,13 @@ def test_update_categories(self): self.assertEqual(len(source_coco_dict["images"]), 1) self.assertEqual(len(source_coco_dict["categories"]), 1) self.assertEqual( - source_coco_dict["categories"], - [{"id": 1, "name": "car", "supercategory": "car"}], + source_coco_dict["categories"], [{"id": 1, "name": "car", "supercategory": "car"}], ) self.assertEqual(source_coco_dict["annotations"][1]["category_id"], 1) # update categories desired_name2id = {"human": 1, "car": 2, "big_vehicle": 3} - target_coco_dict = update_categories( - desired_name2id=desired_name2id, coco_dict=source_coco_dict - ) + target_coco_dict = update_categories(desired_name2id=desired_name2id, coco_dict=source_coco_dict) self.assertEqual(len(target_coco_dict["annotations"]), 5) self.assertEqual(len(target_coco_dict["images"]), 1) @@ -384,8 +356,7 @@ def test_coco_update_categories(self): self.assertEqual(len(coco.json["images"]), 1) self.assertEqual(len(coco.json["categories"]), 1) self.assertEqual( - coco.json["categories"], - [{"id": 1, "name": "car", "supercategory": "car"}], + coco.json["categories"], [{"id": 1, "name": "car", "supercategory": "car"}], ) self.assertEqual(coco.json["annotations"][1]["category_id"], 1) self.assertEqual(coco.image_dir, image_dir) @@ -422,10 +393,7 @@ def test_get_imageid2annotationlist_mapping(self): def check_image_id(image_id): - image_ids = [ - annotationlist["image_id"] - for annotationlist in imageid2annotationlist_mapping[image_id] - ] + image_ids = [annotationlist["image_id"] for annotationlist in imageid2annotationlist_mapping[image_id]] self.assertEqual(image_ids, [image_id] * len(image_ids)) check_image_id(image_id=1) @@ -492,24 +460,19 @@ def test_merge_from_list(self): self.assertEqual(len(merged_coco_dict["annotations"]), 22) self.assertEqual(len(merged_coco_dict["categories"]), 2) self.assertEqual( - merged_coco_dict["annotations"][12]["bbox"], - coco_dict3["annotations"][0]["bbox"], + merged_coco_dict["annotations"][12]["bbox"], coco_dict3["annotations"][0]["bbox"], ) self.assertEqual( - merged_coco_dict["annotations"][12]["id"], - 13, + merged_coco_dict["annotations"][12]["id"], 13, ) self.assertEqual( - merged_coco_dict["annotations"][12]["image_id"], - 3, + merged_coco_dict["annotations"][12]["image_id"], 3, ) self.assertEqual( - merged_coco_dict["annotations"][9]["category_id"], - 1, + merged_coco_dict["annotations"][9]["category_id"], 1, ) self.assertEqual( - merged_coco_dict["annotations"][9]["image_id"], - 2, + merged_coco_dict["annotations"][9]["image_id"], 2, ) def test_coco_merge(self): @@ -531,28 +494,22 @@ def test_coco_merge(self): self.assertEqual(len(coco1.images), 3) self.assertEqual( - coco1.json["annotations"][12]["id"], - 13, + coco1.json["annotations"][12]["id"], 13, ) self.assertEqual( - coco1.json["annotations"][12]["image_id"], - 3, + coco1.json["annotations"][12]["image_id"], 3, ) self.assertEqual( - coco1.json["annotations"][9]["category_id"], - 1, + coco1.json["annotations"][9]["category_id"], 1, ) self.assertEqual( - coco1.json["annotations"][9]["image_id"], - 2, + coco1.json["annotations"][9]["image_id"], 2, ) self.assertEqual( - coco1.image_dir, - image_dir, + coco1.image_dir, image_dir, ) self.assertEqual( - coco2.image_dir, - image_dir, + coco2.image_dir, image_dir, ) self.assertEqual(coco2.stats["num_images"], len(coco2.images)) self.assertEqual(coco2.stats["num_annotations"], len(coco2.json["annotations"])) @@ -566,35 +523,26 @@ def test_get_subsampled_coco(self): coco = Coco.from_coco_dict_or_path(coco_path, image_dir=image_dir) subsampled_coco = coco.get_subsampled_coco(subsample_ratio=5) self.assertEqual( - len(coco.json["images"]), - 50, + len(coco.json["images"]), 50, ) self.assertEqual( - len(subsampled_coco.json["images"]), - 10, + len(subsampled_coco.json["images"]), 10, ) self.assertEqual( - len(coco.images[5].annotations), - len(subsampled_coco.images[1].annotations), + len(coco.images[5].annotations), len(subsampled_coco.images[1].annotations), ) self.assertEqual( - len(coco.images[5].annotations), - len(subsampled_coco.images[1].annotations), + len(coco.images[5].annotations), len(subsampled_coco.images[1].annotations), ) self.assertEqual( - coco.image_dir, - image_dir, + coco.image_dir, image_dir, ) self.assertEqual( - subsampled_coco.image_dir, - image_dir, + subsampled_coco.image_dir, image_dir, ) + self.assertEqual(subsampled_coco.stats["num_images"], len(subsampled_coco.images)) self.assertEqual( - subsampled_coco.stats["num_images"], len(subsampled_coco.images) - ) - self.assertEqual( - subsampled_coco.stats["num_annotations"], - len(subsampled_coco.json["annotations"]), + subsampled_coco.stats["num_annotations"], len(subsampled_coco.json["annotations"]), ) def test_get_area_filtered_coco(self): @@ -607,115 +555,79 @@ def test_get_area_filtered_coco(self): coco = Coco.from_coco_dict_or_path(coco_path, image_dir=image_dir) area_filtered_coco = coco.get_area_filtered_coco(min=min_area, max=max_area) self.assertEqual( - len(coco.json["images"]), - 50, + len(coco.json["images"]), 50, ) self.assertEqual( - len(area_filtered_coco.json["images"]), - 15, + len(area_filtered_coco.json["images"]), 15, ) self.assertGreater( - area_filtered_coco.stats["min_annotation_area"], - min_area, + area_filtered_coco.stats["min_annotation_area"], min_area, ) self.assertLess( - area_filtered_coco.stats["max_annotation_area"], - max_area, - ) - self.assertEqual( - area_filtered_coco.image_dir, - image_dir, + area_filtered_coco.stats["max_annotation_area"], max_area, ) self.assertEqual( - area_filtered_coco.stats["num_images"], len(area_filtered_coco.images) + area_filtered_coco.image_dir, image_dir, ) + self.assertEqual(area_filtered_coco.stats["num_images"], len(area_filtered_coco.images)) self.assertEqual( - area_filtered_coco.stats["num_annotations"], - len(area_filtered_coco.json["annotations"]), + area_filtered_coco.stats["num_annotations"], len(area_filtered_coco.json["annotations"]), ) intervals_per_category = { "human": {"min": 20, "max": 10000}, "vehicle": {"min": 50, "max": 15000}, } - area_filtered_coco = coco.get_area_filtered_coco( - intervals_per_category=intervals_per_category - ) + area_filtered_coco = coco.get_area_filtered_coco(intervals_per_category=intervals_per_category) self.assertEqual( - len(coco.json["images"]), - 50, + len(coco.json["images"]), 50, ) self.assertEqual( - len(area_filtered_coco.json["images"]), - 22, + len(area_filtered_coco.json["images"]), 22, ) self.assertGreater( area_filtered_coco.stats["min_annotation_area"], - min( - intervals_per_category["human"]["min"], - intervals_per_category["vehicle"]["min"], - ), + min(intervals_per_category["human"]["min"], intervals_per_category["vehicle"]["min"],), ) self.assertLess( area_filtered_coco.stats["max_annotation_area"], - max( - intervals_per_category["human"]["max"], - intervals_per_category["vehicle"]["max"], - ), - ) - self.assertEqual( - area_filtered_coco.image_dir, - image_dir, + max(intervals_per_category["human"]["max"], intervals_per_category["vehicle"]["max"],), ) self.assertEqual( - area_filtered_coco.stats["num_images"], len(area_filtered_coco.images) + area_filtered_coco.image_dir, image_dir, ) + self.assertEqual(area_filtered_coco.stats["num_images"], len(area_filtered_coco.images)) self.assertEqual( - area_filtered_coco.stats["num_annotations"], - len(area_filtered_coco.json["annotations"]), + area_filtered_coco.stats["num_annotations"], len(area_filtered_coco.json["annotations"]), ) intervals_per_category = { "human": {"min": 20, "max": 10000}, "vehicle": {"min": 50, "max": 15000}, } - area_filtered_coco = coco.get_area_filtered_coco( - intervals_per_category=intervals_per_category - ) + area_filtered_coco = coco.get_area_filtered_coco(intervals_per_category=intervals_per_category) self.assertEqual( - len(coco.json["images"]), - 50, + len(coco.json["images"]), 50, ) self.assertEqual( - len(area_filtered_coco.json["images"]), - 22, + len(area_filtered_coco.json["images"]), 22, ) self.assertGreater( area_filtered_coco.stats["min_annotation_area"], - min( - intervals_per_category["human"]["min"], - intervals_per_category["vehicle"]["min"], - ), + min(intervals_per_category["human"]["min"], intervals_per_category["vehicle"]["min"],), ) self.assertLess( area_filtered_coco.stats["max_annotation_area"], - max( - intervals_per_category["human"]["max"], - intervals_per_category["vehicle"]["max"], - ), - ) - self.assertEqual( - area_filtered_coco.image_dir, - image_dir, + max(intervals_per_category["human"]["max"], intervals_per_category["vehicle"]["max"],), ) self.assertEqual( - area_filtered_coco.stats["num_images"], len(area_filtered_coco.images) + area_filtered_coco.image_dir, image_dir, ) + self.assertEqual(area_filtered_coco.stats["num_images"], len(area_filtered_coco.images)) self.assertEqual( - area_filtered_coco.stats["num_annotations"], - len(area_filtered_coco.json["annotations"]), + area_filtered_coco.stats["num_annotations"], len(area_filtered_coco.json["annotations"]), ) def test_cocovid(self): diff --git a/tests/test_filter.py b/tests/test_filter.py index 3158e49f4..895301a8f 100644 --- a/tests/test_filter.py +++ b/tests/test_filter.py @@ -65,9 +65,7 @@ def _perturb(box: BoundingBox): return BoundingBox(box=[minx, miny, maxx, maxy], shift_amount=box.shift_amount) -def perturb_boxes( - preds: List[ObjectPrediction], -) -> List[ObjectPrediction]: +def perturb_boxes(preds: List[ObjectPrediction],) -> List[ObjectPrediction]: preds = deepcopy(preds) for i in range(len(preds)): if i % 2 == 0: diff --git a/tests/test_mmdetectionmodel.py b/tests/test_mmdetectionmodel.py index b780fa406..d3e5287f9 100644 --- a/tests/test_mmdetectionmodel.py +++ b/tests/test_mmdetectionmodel.py @@ -135,20 +135,17 @@ def test_convert_original_predictions_with_mask_output(self): self.assertEqual(object_prediction_list[0].category.id, 0) self.assertEqual(object_prediction_list[0].category.name, "person") self.assertEqual( - object_prediction_list[0].bbox.to_coco_bbox(), - [337, 124, 8, 14], + object_prediction_list[0].bbox.to_coco_bbox(), [337, 124, 8, 14], ) self.assertEqual(object_prediction_list[1].category.id, 2) self.assertEqual(object_prediction_list[1].category.name, "car") self.assertEqual( - object_prediction_list[1].bbox.to_coco_bbox(), - [657, 204, 13, 10], + object_prediction_list[1].bbox.to_coco_bbox(), [657, 204, 13, 10], ) self.assertEqual(object_prediction_list[5].category.id, 2) self.assertEqual(object_prediction_list[5].category.name, "car") self.assertEqual( - object_prediction_list[2].bbox.to_coco_bbox(), - [760, 232, 20, 15], + object_prediction_list[2].bbox.to_coco_bbox(), [760, 232, 20, 15], ) def test_convert_original_predictions_without_mask_output(self): @@ -182,19 +179,15 @@ def test_convert_original_predictions_without_mask_output(self): self.assertEqual(object_prediction_list[0].category.id, 2) self.assertEqual(object_prediction_list[0].category.name, "car") self.assertEqual( - object_prediction_list[0].bbox.to_coco_bbox(), - [448, 309, 47, 32], + object_prediction_list[0].bbox.to_coco_bbox(), [448, 309, 47, 32], ) self.assertEqual(object_prediction_list[5].category.id, 2) self.assertEqual(object_prediction_list[5].category.name, "car") self.assertEqual( - object_prediction_list[5].bbox.to_coco_bbox(), - [523, 225, 22, 17], + object_prediction_list[5].bbox.to_coco_bbox(), [523, 225, 22, 17], ) - def test_create_original_predictions_from_object_prediction_list_with_mask_output( - self, - ): + def test_create_original_predictions_from_object_prediction_list_with_mask_output(self,): from sahi.model import MmdetDetectionModel # init model @@ -237,9 +230,7 @@ def test_create_original_predictions_from_object_prediction_list_with_mask_outpu self.assertEqual(len(original_predictions_1[0][1]), len(original_predictions_1[0][1])) # 0 self.assertEqual(original_predictions_1[0][1].shape, original_predictions_1[0][1].shape) # (0, 5) - def test_create_original_predictions_from_object_prediction_list_without_mask_output( - self, - ): + def test_create_original_predictions_from_object_prediction_list_without_mask_output(self,): from sahi.model import MmdetDetectionModel # init model diff --git a/tests/test_predict.py b/tests/test_predict.py index caa1ac26d..bc675fff0 100644 --- a/tests/test_predict.py +++ b/tests/test_predict.py @@ -47,10 +47,7 @@ def test_get_prediction_mmdet(self): # get full sized prediction prediction_result = get_prediction( - image=image, - detection_model=mmdet_detection_model, - shift_amount=[0, 0], - full_shape=None, + image=image, detection_model=mmdet_detection_model, shift_amount=[0, 0], full_shape=None, ) object_prediction_list = prediction_result.object_prediction_list diff --git a/tests/test_shapelyutils.py b/tests/test_shapelyutils.py index 1ae96a7e2..f05654f8c 100644 --- a/tests/test_shapelyutils.py +++ b/tests/test_shapelyutils.py @@ -16,9 +16,7 @@ def test_get_shapely_box(self): x, y, width, height = 1, 1, 256, 256 shapely_box = get_shapely_box(x, y, width, height) - self.assertListEqual( - shapely_box.exterior.coords.xy[0].tolist(), [257.0, 257.0, 1.0, 1.0, 257.0] - ) + self.assertListEqual(shapely_box.exterior.coords.xy[0].tolist(), [257.0, 257.0, 1.0, 1.0, 257.0]) self.assertEqual(shapely_box.area, 65536) self.assertTupleEqual(shapely_box.bounds, (1, 1, 257, 257)) @@ -27,8 +25,7 @@ def test_get_shapely_multipolygon(self): shapely_multipolygon = get_shapely_multipolygon(coco_segmentation) self.assertListEqual( - shapely_multipolygon[0].exterior.coords.xy[0].tolist(), - [1.0, 325, 250, 5, 1], + shapely_multipolygon[0].exterior.coords.xy[0].tolist(), [1.0, 325, 250, 5, 1], ) self.assertEqual(shapely_multipolygon.area, 41177.5) self.assertTupleEqual(shapely_multipolygon.bounds, (1, 1, 325, 200)) @@ -42,88 +39,58 @@ def test_shapely_annotation(self): # test conversion methods coco_segmentation = shapely_annotation.to_coco_segmentation() self.assertEqual( - coco_segmentation, - [[1, 1, 325, 125, 250, 200, 5, 200]], + coco_segmentation, [[1, 1, 325, 125, 250, 200, 5, 200]], ) opencv_contours = shapely_annotation.to_opencv_contours() self.assertEqual( - opencv_contours, - [ - [ - [[1, 1]], - [[325, 125]], - [[250, 200]], - [[5, 200]], - [[1, 1]], - ] - ], + opencv_contours, [[[[1, 1]], [[325, 125]], [[250, 200]], [[5, 200]], [[1, 1]],]], ) coco_bbox = shapely_annotation.to_coco_bbox() self.assertEqual( - coco_bbox, - [1, 1, 324, 199], + coco_bbox, [1, 1, 324, 199], ) voc_bbox = shapely_annotation.to_voc_bbox() self.assertEqual( - voc_bbox, - [1, 1, 325, 200], + voc_bbox, [1, 1, 325, 200], ) # test properties self.assertEqual( - shapely_annotation.area, - int(shapely_multipolygon.area), + shapely_annotation.area, int(shapely_multipolygon.area), ) self.assertEqual( - shapely_annotation.multipolygon, - shapely_multipolygon, + shapely_annotation.multipolygon, shapely_multipolygon, ) # init shapely_annotation from coco bbox coco_bbox = [1, 1, 100, 100] - shapely_polygon = get_shapely_box( - x=coco_bbox[0], y=coco_bbox[1], width=coco_bbox[2], height=coco_bbox[3] - ) + shapely_polygon = get_shapely_box(x=coco_bbox[0], y=coco_bbox[1], width=coco_bbox[2], height=coco_bbox[3]) shapely_annotation = ShapelyAnnotation.from_coco_bbox(coco_bbox) # test conversion methods coco_segmentation = shapely_annotation.to_coco_segmentation() self.assertEqual( - coco_segmentation, - [[101, 1, 101, 101, 1, 101, 1, 1]], + coco_segmentation, [[101, 1, 101, 101, 1, 101, 1, 1]], ) opencv_contours = shapely_annotation.to_opencv_contours() self.assertEqual( - opencv_contours, - [ - [ - [[101, 1]], - [[101, 101]], - [[1, 101]], - [[1, 1]], - [[101, 1]], - ] - ], + opencv_contours, [[[[101, 1]], [[101, 101]], [[1, 101]], [[1, 1]], [[101, 1]],]], ) coco_bbox = shapely_annotation.to_coco_bbox() self.assertEqual( - coco_bbox, - [1, 1, 100, 100], + coco_bbox, [1, 1, 100, 100], ) voc_bbox = shapely_annotation.to_voc_bbox() self.assertEqual( - voc_bbox, - [1, 1, 101, 101], + voc_bbox, [1, 1, 101, 101], ) # test properties self.assertEqual( - shapely_annotation.area, - MultiPolygon([shapely_polygon]).area, + shapely_annotation.area, MultiPolygon([shapely_polygon]).area, ) self.assertEqual( - shapely_annotation.multipolygon, - MultiPolygon([shapely_polygon]), + shapely_annotation.multipolygon, MultiPolygon([shapely_polygon]), ) def test_get_intersection(self): @@ -133,9 +100,7 @@ def test_get_intersection(self): coco_segmentation = [[1, 1, 325, 125, 250, 200, 5, 200]] shapely_annotation = ShapelyAnnotation.from_coco_segmentation(coco_segmentation) - intersection_shapely_annotation = shapely_annotation.get_intersection( - shapely_box - ) + intersection_shapely_annotation = shapely_annotation.get_intersection(shapely_box) test_list = intersection_shapely_annotation.to_list()[0] true_list = [(256, 97), (0, 0), (4, 199), (249, 199), (256, 192), (256, 97)] @@ -144,30 +109,12 @@ def test_get_intersection(self): self.assertEqual(int(test_list[i][j]), int(true_list[i][j])) self.assertEqual( - intersection_shapely_annotation.to_coco_segmentation(), - [ - [ - 256, - 97, - 0, - 0, - 4, - 199, - 249, - 199, - 256, - 192, - ] - ], + intersection_shapely_annotation.to_coco_segmentation(), [[256, 97, 0, 0, 4, 199, 249, 199, 256, 192,]], ) - self.assertEqual( - intersection_shapely_annotation.to_coco_bbox(), [0, 0, 256, 199] - ) + self.assertEqual(intersection_shapely_annotation.to_coco_bbox(), [0, 0, 256, 199]) - self.assertEqual( - intersection_shapely_annotation.to_voc_bbox(), [0, 0, 256, 199] - ) + self.assertEqual(intersection_shapely_annotation.to_voc_bbox(), [0, 0, 256, 199]) def test_get_empty_intersection(self): x, y, width, height = 300, 300, 256, 256 @@ -176,9 +123,7 @@ def test_get_empty_intersection(self): coco_segmentation = [[1, 1, 325, 125, 250, 200, 5, 200]] shapely_annotation = ShapelyAnnotation.from_coco_segmentation(coco_segmentation) - intersection_shapely_annotation = shapely_annotation.get_intersection( - shapely_box - ) + intersection_shapely_annotation = shapely_annotation.get_intersection(shapely_box) self.assertEqual(intersection_shapely_annotation.area, 0) diff --git a/tests/test_slicing.py b/tests/test_slicing.py index 360279fea..8a7e6ccd0 100644 --- a/tests/test_slicing.py +++ b/tests/test_slicing.py @@ -38,8 +38,7 @@ def test_slice_image(self): self.assertEqual(slice_image_result.coco_images[0].annotations, []) self.assertEqual(slice_image_result.coco_images[15].annotations[1].area, 7296) self.assertEqual( - slice_image_result.coco_images[15].annotations[1].bbox, - [17, 186, 48, 152], + slice_image_result.coco_images[15].annotations[1].bbox, [17, 186, 48, 152], ) image_cv = read_image(image_path) @@ -62,8 +61,7 @@ def test_slice_image(self): self.assertEqual(slice_image_result.coco_images[0].annotations, []) self.assertEqual(slice_image_result.coco_images[15].annotations[1].area, 7296) self.assertEqual( - slice_image_result.coco_images[15].annotations[1].bbox, - [17, 186, 48, 152], + slice_image_result.coco_images[15].annotations[1].bbox, [17, 186, 48, 152], ) image_pil = Image.open(image_path) @@ -86,8 +84,7 @@ def test_slice_image(self): self.assertEqual(slice_image_result.coco_images[0].annotations, []) self.assertEqual(slice_image_result.coco_images[15].annotations[1].area, 7296) self.assertEqual( - slice_image_result.coco_images[15].annotations[1].bbox, - [17, 186, 48, 152], + slice_image_result.coco_images[15].annotations[1].bbox, [17, 186, 48, 152], ) def test_slice_coco(self): @@ -122,8 +119,7 @@ def test_slice_coco(self): self.assertEqual(coco_dict["annotations"][2]["category_id"], 1) self.assertEqual(coco_dict["annotations"][2]["area"], 12483) self.assertEqual( - coco_dict["annotations"][2]["bbox"], - [340, 204, 73, 171], + coco_dict["annotations"][2]["bbox"], [340, 204, 73, 171], ) shutil.rmtree(output_dir) @@ -157,8 +153,7 @@ def test_slice_coco(self): self.assertEqual(coco_dict["annotations"][2]["category_id"], 1) self.assertEqual(coco_dict["annotations"][2]["area"], 12483) self.assertEqual( - coco_dict["annotations"][2]["bbox"], - [340, 204, 73, 171], + coco_dict["annotations"][2]["bbox"], [340, 204, 73, 171], ) shutil.rmtree(output_dir) diff --git a/tests/test_yolov5model.py b/tests/test_yolov5model.py index e8279f288..aa111d5c7 100644 --- a/tests/test_yolov5model.py +++ b/tests/test_yolov5model.py @@ -103,13 +103,10 @@ def test_convert_original_predictions(self): self.assertEqual(object_prediction_list[5].category.id, 2) self.assertEqual(object_prediction_list[5].category.name, "car") self.assertEqual( - object_prediction_list[5].bbox.to_coco_bbox(), - [617, 195, 24, 23], + object_prediction_list[5].bbox.to_coco_bbox(), [617, 195, 24, 23], ) - def test_create_original_predictions_from_object_prediction_list( - self, - ): + def test_create_original_predictions_from_object_prediction_list(self,): pass # TODO: implement object_prediction_list to yolov5 format conversion