yjh0410 1 年之前
父节点
当前提交
9fcfcadcae

+ 1 - 99
odlab/config/detr_config.py

@@ -1,4 +1,4 @@
-# DETR
+# End-to-end Detection with Transformer
 
 def build_detr_config(args):
     if   args.model == 'detr_r50':
@@ -21,101 +21,3 @@ class Detr_R50_Config(DetrBaseConfig):
         super().__init__()
         ## Backbone
         pass
-
-
-detr_cfg = {
-    'detr_r50':{
-        # ---------------- Model config ----------------
-        ## Model scale
-        # Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'pretrained_weight': 'spark_resnet50',  # Cls: imagenet1k_v2; MIM: spark_resnet50
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'max_stride': 32,
-        'out_stride': 16,
-        # Transformer Ecndoer
-        'hidden_dim': 256,
-        'en_num_heads': 8,
-        'en_num_layers': 6,
-        'en_ffn_dim': 2048,
-        'en_dropout': 0.1,
-        'en_act': 'gelu',
-        'en_pre_norm': True,
-        # Transformer Decoder
-        'transformer': 'detr_transformer',
-        'de_num_heads': 8,
-        'de_num_layers': 6,
-        'de_ffn_dim': 2048,
-        'de_dropout': 0.0,
-        'de_act': 'gelu',
-        'de_pre_norm': True,
-        'rpe_hidden_dim': 512,
-        'use_checkpoint': False,
-        'proposal_feature_levels': 3,
-        'proposal_tgt_strides': [8, 16, 32],
-        'num_queries_one2one': 300,
-        'num_queries_one2many': 1500,
-        # Post process
-        'train_topk': 300,
-        'train_conf_thresh': 0.001,
-        'train_nms_thresh': 0.5,
-        'test_topk': 300,
-        'test_conf_thresh': 0.001,
-        'test_nms_thresh': 0.5,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ---------------- Assignment config ----------------
-        'matcher_hpy': {'cost_class': 2.0,
-                        'cost_bbox': 1.0,
-                        'cost_giou': 2.0,},
-        # ---------------- Loss config ----------------
-        'k_one2many': 6,
-        'lambda_one2many': 1.0,
-        'loss_coeff': {'class': 2,
-                       'bbox': 1,
-                       'giou': 2,},
-        # ----------------- Training -----------------
-        ## Optimizer
-        'optimizer': 'adamw',
-        'base_lr': 0.0002 / 16,
-        'backbone_lr_ratio': 0.1,
-        'momentum': None,
-        'weight_decay': 0.05,
-        'clip_max_norm': 0.1,
-        ## Params dict
-        'param_dict_type': 'detr',
-        'lr_backbone_names': ['backbone',],
-        'lr_linear_proj_names': ["reference_points", "sampling_offsets",],  # These two names are not required by PlainDETR
-        'lr_linear_proj_mult': 0.1,
-        'wd_norm_names': ["norm", "bias", "rpb_mlp", "cpb_mlp", "level_embed",],
-        'wd_norm_mult': 0.0,
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 1000,
-        'warmup_factor': 0.00066667,
-        ## Training scheduler
-        'scheduler': '1x',
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [11],     # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_min_size2': [400, 500, 600],
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        'random_crop_size': [320, 600],
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': True,
-        'trans_config': None,
-        'box_format': 'xywh',
-        'normalize_coords': False,
-    },
-
-}

+ 170 - 313
odlab/config/fcos_config.py

@@ -3,12 +3,101 @@
 def build_fcos_config(args):
     if   args.model == 'fcos_r18_1x':
         return Fcos_R18_1x_Config()
+    elif args.model == 'fcos_r50_1x':
+        return Fcos_R50_1x_Config()
+    elif args.model == 'fcos_rt_r18_1x':
+        return FcosRT_R18_1x_Config()
+    elif args.model == 'fcos_rt_r50_1x':
+        return FcosRT_R50_1x_Config()
     else:
         raise NotImplementedError("No config for model: {}".format(args.model))
 
 class FcosBaseConfig(object):
     def __init__(self):
-        pass
+        # --------- Backbone ---------
+        self.backbone = "resnet50"
+        self.bk_norm  = "FrozeBN"
+        self.res5_dilation = False
+        self.use_pretrained = True
+        self.freeze_at = 1
+        self.max_stride = 128
+        self.out_stride = [8, 16, 32, 64, 128]
+
+        # --------- Neck ---------
+        self.neck = 'basic_fpn'
+        self.fpn_p6_feat = True
+        self.fpn_p7_feat = True
+        self.fpn_p6_from_c5  = False
+
+        # --------- Head ---------
+        self.head = 'fcos_head'
+        self.head_dim = 256
+        self.num_cls_head = 4
+        self.num_reg_head = 4
+        self.head_act     = 'relu'
+        self.head_norm    = 'GN'
+
+        # --------- Post-process ---------
+        self.train_topk = 1000
+        self.train_conf_thresh = 0.05
+        self.train_nms_thresh  = 0.6
+        self.test_topk = 100
+        self.test_conf_thresh = 0.5
+        self.test_nms_thresh  = 0.45
+        self.nms_class_agnostic = True
+
+        # --------- Label Assignment ---------
+        self.matcher = 'fcos_matcher'
+        self.matcher_hpy = {'center_sampling_radius': 1.5,
+                            'object_sizes_of_interest': [[-1, 64],
+                                                         [64, 128],
+                                                         [128, 256],
+                                                         [256, 512],
+                                                         [512, float('inf')]]
+                                                         },
+
+        # --------- Loss weight ---------
+        self.focal_loss_alpha = 0.25
+        self.focal_loss_gamma = 2.0
+        self.loss_cls_weight  = 1.0
+        self.loss_reg_weight  = 1.0
+        self.loss_ctn_weight  = 1.0
+
+        # --------- Optimizer ---------
+        self.optimizer = 'sgd'
+        self.per_image_lr  = 0.01 / 16
+        self.bk_lr_ratio   = 1.0 / 1.0
+        self.momentum      = 0.9
+        self.weight_decay  = 1e-4
+        self.clip_max_norm = -1.0
+
+        # --------- LR Scheduler ---------
+        self.lr_scheduler = 'step'
+        self.warmup = 'linear'
+        self.warmup_iters = 500
+        self.warmup_factor = 0.00066667
+
+        # --------- Train epoch ---------
+        self.max_epoch = 12,       # 1x
+        self.lr_epoch  = [8, 11]   # 1x
+
+        # --------- Data process ---------
+        ## input size
+        self.train_min_size = [800]   # short edge of image
+        self.train_max_size = 1333
+        self.test_min_size  = [800]
+        self.test_max_size  = 1333
+        ## Pixel mean & std
+        self.pixel_mean = [0.485, 0.456, 0.406]
+        self.pixel_std  = [0.229, 0.224, 0.225]
+        ## Transforms
+        self.box_format = 'xyxy'
+        self.normalize_coords = False
+        self.detr_style = False
+        self.trans_config = [
+            {'name': 'RandomHFlip'},
+            {'name': 'RandomResize'},
+        ]
 
     def print_config(self):
         config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
@@ -19,330 +108,98 @@ class Fcos_R18_1x_Config(FcosBaseConfig):
     def __init__(self) -> None:
         super().__init__()
         ## Backbone
-        pass
-
+        self.backbone = "resnet18"
 
-fcos_cfg = {
-    'fcos_r18_1x':{
-        # ----------------- Model-----------------
+class Fcos_R50_1x_Config(FcosBaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
         ## Backbone
-        'backbone': 'resnet18',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'pretrained_weight': 'imagenet1k_v1',
-        'max_stride': 128,
-        'out_stride': [8, 16, 32, 64, 128],
-        ## Neck
-        'neck': 'basic_fpn',
-        'fpn_p6_feat': True,
-        'fpn_p7_feat': True,
-        'fpn_p6_from_c5': False,
-        ## Head
-        'head': 'fcos_head',
-        'head_dim': 256,
-        'num_cls_head': 4,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'GN',
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 100,
-        'test_conf_thresh': 0.5,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'fcos_matcher',
-        'matcher_hpy':{'center_sampling_radius': 1.5,
-                       'object_sizes_of_interest': [[-1, 64], [64, 128], [128, 256], [256, 512], [512, float('inf')]]
-                       },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        'loss_ctn_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.01 / 16,
-        'backbone_lr_ratio': 1.0 / 1.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': -1.0,
-        'param_dict_type': 'default',
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [8, 11],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomResize'},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        self.backbone = "resnet50"
 
-    'fcos_r50_1x':{
-        # ----------------- Model-----------------
+class FcosRT_R18_1x_Config(FcosBaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
         ## Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'pretrained_weight': 'imagenet1k_v1',
-        'max_stride': 128,
-        'out_stride': [8, 16, 32, 64, 128],
-        ## Neck
-        'neck': 'basic_fpn',
-        'fpn_p6_feat': True,
-        'fpn_p7_feat': True,
-        'fpn_p6_from_c5': False,
-        ## Head
-        'head': 'fcos_head',
-        'head_dim': 256,
-        'num_cls_head': 4,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'GN',
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.65,
-        'test_topk': 100,
-        'test_conf_thresh': 0.5,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'fcos_matcher',
-        'matcher_hpy':{'center_sampling_radius': 1.5,
-                       'object_sizes_of_interest': [[-1, 64], [64, 128], [128, 256], [256, 512], [512, float('inf')]]
-                       },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        'loss_ctn_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.01 / 16,
-        'backbone_lr_ratio': 1.0 / 1.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': -1.0,
-        'param_dict_type': 'default',
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [8, 11],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomResize'},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        self.backbone = "resnet18"
+        self.max_stride = 32
+        self.out_stride = [8, 16, 32]
 
-    'fcos_rt_r18_1x':{
-        # ----------------- Model-----------------
-        ## Backbone
-        'backbone': 'resnet18',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'pretrained_weight': 'imagenet1k_v1',
-        'max_stride': 32,
-        'out_stride': [8, 16, 32],
-        ## Neck
-        'neck': 'basic_fpn',
-        'fpn_p6_feat': False,
-        'fpn_p7_feat': False,
-        'fpn_p6_from_c5': False,
-        ## Head
-        'head': 'fcos_head',
-        'head_dim': 256,
-        'num_cls_head': 4,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'GN',
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 100,
-        'test_conf_thresh': 0.5,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'fcos_matcher',
-        'matcher_hpy':{'center_sampling_radius': 1.5,
-                       'object_sizes_of_interest': [[-1, 64], [64, 128], [128, float('inf')]]
-                       },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        'loss_ctn_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.01 / 16,
-        'backbone_lr_ratio': 1.0 / 1.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': -1.0,
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 36,       # 1x
-        'lr_epoch': [24, 33],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608],   # short edge of image
-        'train_max_size': 900,
-        'test_min_size': [512],
-        'test_max_size': 736,
+        # --------- Neck ---------
+        self.neck = 'basic_fpn'
+        self.fpn_p6_feat = False
+        self.fpn_p7_feat = False
+        self.fpn_p6_from_c5  = False
+
+        # --------- Label Assignment ---------
+        self.matcher = 'fcos_matcher'
+        self.matcher_hpy = {'center_sampling_radius': 1.5,
+                            'object_sizes_of_interest': [[-1, 64],
+                                                         [64, 128],
+                                                         [128, float('inf')]]
+                                                         },
+
+        # --------- Train epoch ---------
+        self.max_epoch = 36,        # 3x
+        self.lr_epoch  = [24, 33]   # 3x
+
+        # --------- Data process ---------
+        ## input size
+        self.train_min_size = [256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608]   # short edge of image
+        self.train_max_size = 900
+        self.test_min_size  = [512]
+        self.test_max_size  = 736
         ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
+        self.pixel_mean = [0.485, 0.456, 0.406]
+        self.pixel_std  = [0.229, 0.224, 0.225]
         ## Transforms
-        'detr_style': False,
-        'trans_config': [
+        self.box_format = 'xyxy'
+        self.normalize_coords = False
+        self.detr_style = False
+        self.trans_config = [
             {'name': 'RandomHFlip'},
             {'name': 'RandomResize'},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        ]
 
-    'fcos_rt_r50_1x':{
-        # ----------------- Model-----------------
+class FcosRT_R50_1x_Config(FcosBaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
         ## Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'pretrained_weight': 'imagenet1k_v1',
-        'max_stride': 32,
-        'out_stride': [8, 16, 32],
-        ## Neck
-        'neck': 'basic_fpn',
-        'fpn_p6_feat': False,
-        'fpn_p7_feat': False,
-        'fpn_p6_from_c5': False,
-        ## Head
-        'head': 'fcos_head',
-        'head_dim': 256,
-        'num_cls_head': 4,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'GN',
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 100,
-        'test_conf_thresh': 0.5,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'fcos_matcher',
-        'matcher_hpy':{'center_sampling_radius': 1.5,
-                       'object_sizes_of_interest': [[-1, 64], [64, 128], [128, float('inf')]]
-                       },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        'loss_ctn_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.01 / 16,
-        'backbone_lr_ratio': 1.0 / 1.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': -1.0,
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 36,       # 1x
-        'lr_epoch': [24, 33],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608],   # short edge of image
-        'train_max_size': 900,
-        'test_min_size': [512],
-        'test_max_size': 736,
+        self.backbone = "resnet50"
+        self.max_stride = 32
+        self.out_stride = [8, 16, 32]
+
+        # --------- Neck ---------
+        self.neck = 'basic_fpn'
+        self.fpn_p6_feat = False
+        self.fpn_p7_feat = False
+        self.fpn_p6_from_c5  = False
+
+        # --------- Label Assignment ---------
+        self.matcher = 'fcos_matcher'
+        self.matcher_hpy = {'center_sampling_radius': 1.5,
+                            'object_sizes_of_interest': [[-1, 64],
+                                                         [64, 128],
+                                                         [128, float('inf')]]
+                                                         },
+
+        # --------- Train epoch ---------
+        self.max_epoch = 36,        # 3x
+        self.lr_epoch  = [24, 33]   # 3x
+
+        # --------- Data process ---------
+        ## input size
+        self.train_min_size = [256, 288, 320, 352, 384, 416, 448, 480, 512, 544, 576, 608]   # short edge of image
+        self.train_max_size = 900
+        self.test_min_size  = [512]
+        self.test_max_size  = 736
         ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
+        self.pixel_mean = [0.485, 0.456, 0.406]
+        self.pixel_std  = [0.229, 0.224, 0.225]
         ## Transforms
-        'detr_style': False,
-        'trans_config': [
+        self.box_format = 'xyxy'
+        self.normalize_coords = False
+        self.detr_style = False
+        self.trans_config = [
             {'name': 'RandomHFlip'},
             {'name': 'RandomResize'},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
-
-}
+        ]

+ 130 - 257
odlab/config/yolof_config.py

@@ -1,14 +1,106 @@
-# Fully Convolutional One-Stage object detector
+# You Only Look One-level Feature
 
 def build_yolof_config(args):
     if   args.model == 'yolof_r18_c5_1x':
         return Yolof_R18_C5_1x_Config()
+    elif args.model == 'yolof_r50_c5_1x':
+        return Yolof_R50_C5_1x_Config()
+    elif args.model == 'yolof_r50_dc5_1x':
+        return Yolof_R50_DC5_1x_Config()
     else:
         raise NotImplementedError("No config for model: {}".format(args.model))
 
 class YolofBaseConfig(object):
     def __init__(self):
-        pass
+        # --------- Backbone ---------
+        self.backbone = "resnet50"
+        self.bk_norm  = "FrozeBN"
+        self.res5_dilation = False
+        self.use_pretrained = True
+        self.freeze_at = 1
+        self.max_stride = 32
+        self.out_stride = 32
+
+        # --------- Neck ---------
+        self.neck = 'dilated_encoder'
+        self.neck_dilations = [2, 4, 6, 8]
+        self.neck_expand_ratio = 0.25
+        self.neck_act = 'relu'
+        self.neck_norm = 'GN'
+
+        # --------- Head ---------
+        self.head = 'yolof_head'
+        self.head_dim = 512
+        self.num_cls_head = 2
+        self.num_reg_head = 4
+        self.head_act     = 'relu'
+        self.head_norm    = 'GN'
+        self.center_clamp = 32,        
+        self.anchor_size  = [[32, 32],
+                             [64, 64],
+                             [128, 128],
+                             [256, 256],
+                             [512, 512]],
+
+        # --------- Post-process ---------
+        self.train_topk = 1000
+        self.train_conf_thresh = 0.05
+        self.train_nms_thresh  = 0.6
+        self.test_topk = 300
+        self.test_conf_thresh = 0.3
+        self.test_nms_thresh  = 0.45
+        self.nms_class_agnostic = True
+
+        # --------- Label Assignment ---------
+        self.matcher = 'yolof_matcher'
+        self.matcher_hpy = {'topk_candidates': 4,
+                            'iou_thresh': 0.15,
+                            'ignore_thresh': 0.7,
+                              }
+
+        # --------- Loss weight ---------
+        self.focal_loss_alpha = 0.25
+        self.focal_loss_gamma = 2.0
+        self.loss_cls_weight  = 1.0
+        self.loss_reg_weight  = 1.0
+
+        # --------- Optimizer ---------
+        self.optimizer = 'sgd'
+        self.per_image_lr  = 0.12 / 64
+        self.bk_lr_ratio   = 1.0 / 3.0
+        self.momentum      = 0.9
+        self.weight_decay  = 1e-4
+        self.clip_max_norm = 10.0
+
+
+        # --------- LR Scheduler ---------
+        self.lr_scheduler = 'step'
+        self.warmup = 'linear'
+        self.warmup_iters = 1500
+        self.warmup_factor = 0.00066667
+
+        # --------- Train epoch ---------
+        self.max_epoch = 12,       # 1x
+        self.lr_epoch  = [8, 11]   # 1x
+
+        # --------- Data process ---------
+        ## input size
+        self.train_min_size = [800]   # short edge of image
+        self.train_max_size = 1333
+        self.test_min_size  = [800]
+        self.test_max_size  = 1333
+        ## Pixel mean & std
+        self.pixel_mean = [0.485, 0.456, 0.406]
+        self.pixel_std  = [0.229, 0.224, 0.225]
+        ## Transforms
+        self.box_format = 'xyxy'
+        self.normalize_coords = False
+        self.detr_style = False
+        self.trans_config = [
+            {'name': 'RandomHFlip'},
+            {'name': 'RandomResize'},
+            {'name': 'RandomShift', 'max_shift': 32},
+        ]
 
     def print_config(self):
         config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
@@ -19,264 +111,45 @@ class Yolof_R18_C5_1x_Config(YolofBaseConfig):
     def __init__(self) -> None:
         super().__init__()
         ## Backbone
-        pass
+        # --------- Backbone ---------
+        self.backbone = "resnet18"
 
-yolof_cfg = {
-    # --------------- C5 level ---------------
-    'yolof_r18_c5_1x':{
-        # ----------------- Model-----------------
+class Yolof_R50_C5_1x_Config(YolofBaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
         ## Backbone
-        'backbone': 'resnet18',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'pretrained_weight': 'imagenet1k_v1',
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'max_stride': 32,
-        'out_stride': 32,
-        ## Neck
-        'neck': 'dilated_encoder',
-        'neck_dilations': [2, 4, 6, 8],
-        'neck_expand_ratio': 0.25,
-        'neck_act': 'relu',
-        'neck_norm': 'BN',
-        ## Head
-        'head': 'yolof_head',
-        'head_dim': 512,
-        'num_cls_head': 2,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'BN',
-        'center_clamp': 32,         
-        'anchor_size': [[32, 32], [64, 64], [128, 128], [256, 256], [512, 512]],
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 300,
-        'test_conf_thresh': 0.3,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'yolof_matcher',
-        'matcher_hpy': {'topk_candidates': 4,
-                        'iou_thresh': 0.15,
-                        'ignore_thresh': 0.7,
-                        },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.12 / 64,
-        'backbone_lr_ratio': 1.0 / 3.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': 10.0,
-        'param_dict_type': 'default',
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 1500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [8, 11],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomResize'},
-            {'name': 'RandomShift', 'max_shift': 32},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        # --------- Backbone ---------
+        self.backbone = "resnet50"
 
-    'yolof_r50_c5_1x':{
-        # ----------------- Model-----------------
+class Yolof_R50_DC5_1x_Config(YolofBaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
         ## Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'pretrained_weight': 'imagenet1k_v1',
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'max_stride': 32,
-        'out_stride': 32,
-        ## Neck
-        'neck': 'dilated_encoder',
-        'neck_dilations': [2, 4, 6, 8],
-        'neck_expand_ratio': 0.25,
-        'neck_act': 'relu',
-        'neck_norm': 'BN',
-        ## Head
-        'head': 'yolof_head',
-        'head_dim': 512,
-        'num_cls_head': 2,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'BN',
-        'center_clamp': 32,         
-        'anchor_size': [[32, 32], [64, 64], [128, 128], [256, 256], [512, 512]],
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 300,
-        'test_conf_thresh': 0.3,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'yolof_matcher',
-        'matcher_hpy': {'topk_candidates': 4,
-                        'iou_thresh': 0.15,
-                        'ignore_thresh': 0.7,
-                        },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.12 / 64,
-        'backbone_lr_ratio': 1.0 / 3.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': 10.0,
-        'param_dict_type': 'default',
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 1500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [8, 11],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomResize'},
-            {'name': 'RandomShift', 'max_shift': 32},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        # --------- Backbone ---------
+        self.backbone = "resnet50"
+        self.res5_dilation = True
+        self.use_pretrained = True
+        self.max_stride = 16
+        self.out_stride = 16
 
-    # --------------- Dilated C5 level ---------------
-    'yolof_r50_dc5_1x':{
-        # ----------------- Model-----------------
-        ## Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'res5_dilation': True,
-        'pretrained': True,
-        'pretrained_weight': 'imagenet1k_v1',
-        'freeze_at': 1,  # freeze stem layer + layer1 of the backbone
-        'max_stride': 16,
-        'out_stride': 16,
-        ## Neck
-        'neck': 'dilated_encoder',
-        'neck_dilations': [4, 8, 12, 16],
-        'neck_expand_ratio': 0.25,
-        'neck_act': 'relu',
-        'neck_norm': 'BN',
-        ## Head
-        'head': 'yolof_head',
-        'head_dim': 512,
-        'num_cls_head': 2,
-        'num_reg_head': 4,
-        'head_act': 'relu',
-        'head_norm': 'BN',
-        'center_clamp': 32,         
-        'anchor_size': [[16, 16], [32, 32], [64, 64], [128, 128], [256, 256], [512, 512]],
-        ## Post-process
-        'train_topk': 1000,
-        'train_conf_thresh': 0.05,
-        'train_nms_thresh': 0.6,
-        'test_topk': 300,
-        'test_conf_thresh': 0.3,
-        'test_nms_thresh': 0.45,
-        'nms_class_agnostic': True,  # We prefer to use class-agnostic NMS in the demo.
-        # ----------------- Label Assignment -----------------
-        'matcher': 'yolof_matcher',
-        'matcher_hpy': {'topk_candidates': 8,
-                        'iou_thresh': 0.1,
-                        'ignore_thresh': 0.7,
-                        },
-        # ----------------- Loss weight -----------------
-        ## Loss hyper-parameters
-        'focal_loss_alpha': 0.25,
-        'focal_loss_gamma': 2.0,
-        'loss_cls_weight': 1.0,
-        'loss_reg_weight': 1.0,
-        # ----------------- Training -----------------
-        ## Training scheduler
-        'scheduler': '1x',
-        ## Optimizer
-        'optimizer': 'sgd',
-        'base_lr': 0.12 / 64,
-        'backbone_lr_ratio': 1.0 / 3.0,
-        'momentum': 0.9,
-        'weight_decay': 1e-4,
-        'clip_max_norm': 10.0,
-        'param_dict_type': 'default',
-        ## LR Scheduler
-        'lr_scheduler': 'step',
-        'warmup': 'linear',
-        'warmup_iters': 1500,
-        'warmup_factor': 0.00066667,
-        ## Epoch
-        'max_epoch': 12,      # 1x
-        'lr_epoch': [8, 11],  # 1x
-        # ----------------- Input -----------------
-        ## Transforms
-        'train_min_size': [800],   # short edge of image
-        'train_max_size': 1333,
-        'test_min_size': [800],
-        'test_max_size': 1333,
-        ## Pixel mean & std
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        ## Transforms
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomResize'},
-            {'name': 'RandomShift', 'max_shift': 32},
-        ],
-        'box_format': 'xyxy',
-        'normalize_coords': False,
-    },
+        # --------- Neck ---------
+        self.neck = 'dilated_encoder'
+        self.neck_dilations = [4, 8, 12, 16]
+        self.neck_expand_ratio = 0.25
+        self.neck_act = 'relu'
+        self.neck_norm = 'GN'
+
+        # --------- Head ---------
+        self.anchor_size  = [[16, 16],
+                             [32, 32],
+                             [64, 64],
+                             [128, 128],
+                             [256, 256],
+                             [512, 512]],
 
-}
+        # --------- Label Assignment ---------
+        self.matcher = 'yolof_matcher'
+        self.matcher_hpy = {'topk_candidates': 8,
+                            'iou_thresh': 0.1,
+                            'ignore_thresh': 0.7,
+                              }

+ 26 - 23
odlab/datasets/coco.py

@@ -106,7 +106,7 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser(description='COCO-Dataset')
 
     # opt
-    parser.add_argument('--root', default='/Users/liuhaoran/Desktop/python_work/object-detection/dataset/COCO/',
+    parser.add_argument('--root', default='D:/python_work/dataset/COCO/',
                         help='data root')
     parser.add_argument('--is_train', action="store_true", default=False,
                         help='mixup augmentation.')    
@@ -118,45 +118,48 @@ if __name__ == "__main__":
                      np.random.randint(255)) for _ in range(80)]
 
     # config
-    cfg = {
-        # input size
-        'train_min_size': [800],
-        'train_max_size': 1333,
-        'test_min_size': 800,
-        'test_max_size': 1333,
-        'pixel_mean': [0.485, 0.456, 0.406],
-        'pixel_std':  [0.229, 0.224, 0.225],
-        # trans config
-        'detr_style': False,
-        'trans_config': [
-            {'name': 'RandomResize', 'random_sizes': [400, 500, 600, 700, 800], 'max_size': 1333},
-            {'name': 'RandomHFlip'},
-            {'name': 'RandomShift', 'max_shift': 100}
-        ],
-        'box_format': 'xywh',
-        'normalize_coords': False,
-    }
+    class BaseConfig(object):
+        def __init__(self):
+            # --------- Data process ---------
+            ## input size
+            self.train_min_size = [512]   # short edge of image
+            self.train_max_size = 736
+            self.test_min_size  = [512]
+            self.test_max_size  = 736
+            ## Pixel mean & std
+            self.pixel_mean = [0.485, 0.456, 0.406]
+            self.pixel_std  = [0.229, 0.224, 0.225]
+            ## Transforms
+            self.box_format = 'xyxy'
+            self.normalize_coords = False
+            self.detr_style = False
+            self.trans_config = [
+                {'name': 'RandomHFlip'},
+                {'name': 'RandomResize'},
+                {'name': 'RandomShift', 'max_shift': 32},
+            ]
     
+    cfg = BaseConfig()
     # build dataset
     transform = build_transform(cfg, is_train=True)
-    dataset = build_coco(args, transform, is_train=args.is_train)
+    dataset   = build_coco(args, transform, is_train=False)
 
     for index, (image, target) in enumerate(dataset):
         print("{} / {}".format(index, len(dataset)))
         # to numpy
         image = image.permute(1, 2, 0).numpy()
         # denormalize
-        image = (image * cfg['pixel_std'] + cfg['pixel_mean']) * 255
+        image = (image * cfg.pixel_std + cfg.pixel_mean) * 255
         image = image.astype(np.uint8)[..., (2, 1, 0)].copy()
         orig_h, orig_w = image.shape[:2]
 
         tgt_bboxes = target["boxes"]
         tgt_labels = target["labels"]
         for box, label in zip(tgt_bboxes, tgt_labels):
-            if cfg['normalize_coords']:
+            if cfg.normalize_coords:
                 box[..., [0, 2]] *= orig_w
                 box[..., [1, 3]] *= orig_h
-            if cfg['box_format'] == 'xywh':
+            if cfg.box_format == 'xywh':
                 box_x1y1 = box[..., :2] - box[..., 2:] * 0.5
                 box_x2y2 = box[..., :2] + box[..., 2:] * 0.5
                 box = torch.cat([box_x1y1, box_x2y2], dim=-1)

+ 14 - 20
odlab/datasets/transforms.py

@@ -12,12 +12,6 @@ import torchvision.transforms.functional as F
 
 
 # ----------------- Basic transform functions -----------------
-def box_xyxy_to_cxcywh(x):
-    x0, y0, x1, y1 = x.unbind(-1)
-    b = [(x0 + x1) / 2, (y0 + y1) / 2,
-         (x1 - x0), (y1 - y0)]
-    return torch.stack(b, dim=-1)
-
 def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
     return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
 
@@ -311,14 +305,14 @@ def build_transform(cfg=None, is_train=False):
     # ---------------- Transform for Training ----------------
     if is_train:
         transforms = []
-        trans_config = cfg['trans_config']
+        trans_config = cfg.trans_config
         # build transform
-        if not cfg['detr_style']:
+        if not cfg.detr_style:
             for t in trans_config:
                 if t['name'] == 'RandomHFlip':
                     transforms.append(RandomHorizontalFlip())
                 if t['name'] == 'RandomResize':
-                    transforms.append(RandomResize(cfg['train_min_size'], max_size=cfg['train_max_size']))
+                    transforms.append(RandomResize(cfg.train_min_size, max_size=cfg.train_max_size))
                 if t['name'] == 'RandomSizeCrop':
                     transforms.append(RandomSizeCrop(t['min_crop_size'], max_size=t['max_crop_size']))
                 if t['name'] == 'RandomShift':
@@ -327,33 +321,33 @@ def build_transform(cfg=None, is_train=False):
                     transforms.append(RefineBBox(min_box_size=t['min_box_size']))
             transforms.extend([
                 ToTensor(),
-                Normalize(cfg['pixel_mean'], cfg['pixel_std'], cfg['normalize_coords']),
-                ConvertBoxFormat(cfg['box_format'])
+                Normalize(cfg.pixel_mean, cfg.pixel_std, cfg.normalize_coords),
+                ConvertBoxFormat(cfg.box_format)
             ])
         # build transform for DETR-style detector
         else:
             transforms = [
                 RandomHorizontalFlip(),
                 RandomSelect(
-                    RandomResize(cfg['train_min_size'], max_size=cfg['train_max_size']),
+                    RandomResize(cfg.train_min_size, max_size=cfg.train_max_size),
                     Compose([
-                        RandomResize(cfg['train_min_size2']),
-                        RandomSizeCrop(*cfg['random_crop_size']),
-                        RandomResize(cfg['train_min_size'], max_size=cfg['train_max_size']),
+                        RandomResize(cfg.train_min_size2),
+                        RandomSizeCrop(*cfg.random_crop_size),
+                        RandomResize(cfg.train_min_size, max_size=cfg.train_max_size),
                     ])
                 ),
                 ToTensor(),
-                Normalize(cfg['pixel_mean'], cfg['pixel_std'], cfg['normalize_coords']),
-                ConvertBoxFormat(cfg['box_format'])
+                Normalize(cfg.pixel_mean, cfg.pixel_std, cfg.normalize_coords),
+                ConvertBoxFormat(cfg.box_format)
             ]
 
     # ---------------- Transform for Evaluating ----------------
     else:
         transforms = [
-            RandomResize(cfg['test_min_size'], max_size=cfg['test_max_size']),
+            RandomResize(cfg.test_min_size, max_size=cfg.test_max_size),
             ToTensor(),
-            Normalize(cfg['pixel_mean'], cfg['pixel_std'], cfg['normalize_coords']),
-            ConvertBoxFormat(cfg['box_format'])
+            Normalize(cfg.pixel_mean, cfg.pixel_std, cfg.normalize_coords),
+            ConvertBoxFormat(cfg.box_format)
         ]
     
     return Compose(transforms)

+ 5 - 4
odlab/main.py

@@ -107,14 +107,14 @@ def main():
 
     # ---------------------------- Build Dataset ----------------------------
     transforms = build_transform(cfg, is_train=True)
-    dataset = build_dataset(args, cfg, transforms, is_train=True)
+    dataset    = build_dataset(args, cfg, transforms, is_train=True)
 
     # ---------------------------- Build Dataloader ----------------------------
     train_loader = build_dataloader(args, dataset, per_gpu_batch, collate_fn, is_train=True)
 
     # ---------------------------- Build model ----------------------------
     ## Build model
-    model, criterion = build_model(args, cfg, cfg.num_classes, is_val=True)
+    model, criterion = build_model(args, cfg, is_val=True)
     model.to(device)
     model_without_ddp = model
     ## Calcute Params & GFLOPs
@@ -136,6 +136,7 @@ def main():
     optimizer, start_epoch = build_optimizer(cfg, model_without_ddp, args.resume)
 
     # ---------------------------- Build LR Scheduler ----------------------------
+    cfg.warmup_iters = cfg.warmup_iters * cfg.grad_accumulate
     wp_lr_scheduler = build_wp_lr_scheduler(cfg, cfg.base_lr)
     lr_scheduler    = build_lr_scheduler(cfg, optimizer, args.resume)
 
@@ -155,7 +156,7 @@ def main():
     # ----------------------- Training -----------------------
     print("Start training")
     best_map = -1.
-    for epoch in range(start_epoch, cfg['max_epoch']):
+    for epoch in range(start_epoch, cfg.max_epoch):
         if args.distributed:
             train_loader.batch_sampler.sampler.set_epoch(epoch)
 
@@ -178,7 +179,7 @@ def main():
         if distributed_utils.is_main_process():
             model_eval = model_without_ddp
             to_save = False
-            if (epoch % args.eval_epoch) == 0 or (epoch == cfg['max_epoch'] - 1):
+            if (epoch % args.eval_epoch) == 0 or (epoch == cfg.max_epoch - 1):
                 if evaluator is None:
                     to_save = True
                 else:

+ 23 - 124
odlab/models/backbone/resnet.py

@@ -19,13 +19,6 @@ model_urls = {
     'resnet50':  ResNet50_Weights,
     'resnet101': ResNet101_Weights,
 }
-spark_model_urls = {
-    # SparK's IN1K-MAE pretrained weights
-    'spark_resnet18': None,
-    'spark_resnet34': None,
-    'spark_resnet50': "https://github.com/yjh0410/RT-ODLab/releases/download/backbone_weight/resnet50_in1k_spark_pretrained_timm_style.pth",
-    'spark_resnet101': None,
-}
 
 
 # Frozen BatchNormazlizarion
@@ -66,30 +59,21 @@ class FrozenBatchNorm2d(torch.nn.Module):
         bias = b - rm * scale
         return x * scale + bias
 
-
 # -------------------- ResNet series --------------------
 class ResNet(nn.Module):
     """Standard ResNet backbone."""
     def __init__(self,
-                 name               :str  = "resnet50",
-                 res5_dilation      :bool = False,
-                 norm_type          :str  = "BN",
-                 freeze_at          :int  = 0,
-                 pretrained_weights :str  = "imagenet1k_v1"):
+                 name           :str  = "resnet50",
+                 res5_dilation  :bool = False,
+                 norm_type      :str  = "BN",
+                 freeze_at      :int  = 0,
+                 use_pretrained :bool = False):
         super().__init__()
         # Pretrained
-        assert pretrained_weights in [None, "imagenet1k_v1", "imagenet1k_v2"]
-        if pretrained_weights is not None:
-            if name in ('resnet18', 'resnet34'):
-                pretrained_weights = model_urls[name].IMAGENET1K_V1
-            else:
-                if pretrained_weights == "imagenet1k_v1":
-                    pretrained_weights = model_urls[name].IMAGENET1K_V1
-                else:
-                    pretrained_weights = model_urls[name].IMAGENET1K_V2
+        if use_pretrained:
+            pretrained_weights = model_urls[name].IMAGENET1K_V1
         else:
             pretrained_weights = None
-        print('- Backbone pretrained weight: ', pretrained_weights)
 
         # Norm layer
         print("- Norm layer of backbone: {}".format(norm_type))
@@ -135,116 +119,31 @@ class ResNet(nn.Module):
 
         return fmp_list
 
-class SparkResNet(nn.Module):
-    """ResNet backbone with SparK pretrained."""
-    def __init__(self,
-                 name          :str  = "resnet50",
-                 res5_dilation :bool = False,
-                 norm_type     :str  = "BN",
-                 freeze_at     :int  = 0,
-                 pretrained    :bool = False):
-        super().__init__()
-        # Norm layer
-        print("- Norm layer of backbone: {}".format(norm_type))
-        if norm_type == 'BN':
-            norm_layer = nn.BatchNorm2d
-        elif norm_type == 'FrozeBN':
-            norm_layer = FrozenBatchNorm2d
-        else:
-            raise NotImplementedError("Unknown norm type: {}".format(norm_type))
-
-        # Backbone
-        backbone = getattr(torchvision.models, name)(
-            replace_stride_with_dilation=[False, False, res5_dilation], norm_layer=norm_layer)
-        return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
-        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
-        self.feat_dims = [128, 256, 512] if name in ('resnet18', 'resnet34') else [512, 1024, 2048]
-
-        # Load pretrained
-        if pretrained:
-            self.load_pretrained(name)
-
-        # Freeze
-        print("- Freeze at {}".format(freeze_at))
-        if freeze_at >= 0:
-            for name, parameter in backbone.named_parameters():
-                if freeze_at == 0: # Only freeze stem layer
-                    if 'layer1' not in name and 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
-                        parameter.requires_grad_(False)
-                elif freeze_at == 1: # Freeze stem layer + layer1
-                    if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
-                        parameter.requires_grad_(False)
-                elif freeze_at == 2: # Freeze stem layer + layer1 + layer2
-                    if 'layer3' not in name and 'layer4' not in name:
-                        parameter.requires_grad_(False)
-                elif freeze_at == 3: # Freeze stem layer + layer1 + layer2 + layer3
-                    if 'layer4' not in name:
-                        parameter.requires_grad_(False)
-                else: # Freeze all resnet's layers
-                    parameter.requires_grad_(False)
-
-    def load_pretrained(self, name):
-        url = spark_model_urls["spark_" + name]
-        if url is not None:
-            print('Loading backbone pretrained weight from : {}'.format(url))
-            # checkpoint state dict
-            checkpoint_state_dict = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            # model state dict
-            model_state_dict = self.body.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print('Unused key: ', k)
-            # load the weight
-            self.body.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No backbone pretrained for {}.'.format(name))
-
-    def forward(self, x):
-        xs = self.body(x)
-        fmp_list = []
-        for name, fmp in xs.items():
-            fmp_list.append(fmp)
-
-        return fmp_list
-
 
 # build backbone
 def build_resnet(cfg):
     # ResNet series
-    if cfg['pretrained_weight'] in spark_model_urls.keys():
-        backbone = SparkResNet(
-            name           = cfg['backbone'],
-            res5_dilation  = cfg['res5_dilation'],
-            norm_type      = cfg['backbone_norm'],
-            pretrained     = cfg['pretrained'],
-            freeze_at      = cfg['freeze_at'])
-    else:
-        backbone = ResNet(
-            name               = cfg['backbone'],
-            res5_dilation      = cfg['res5_dilation'],
-            norm_type          = cfg['backbone_norm'],
-            pretrained_weights = cfg['pretrained_weight'],
-            freeze_at          = cfg['freeze_at'])
+    backbone = ResNet(
+        name           = cfg.backbone,
+        res5_dilation  = cfg.res5_dilation,
+        norm_type      = cfg.bk_norm,
+        use_pretrained = cfg.use_pretrained,
+        freeze_at      = cfg.freeze_at)
 
     return backbone, backbone.feat_dims
 
 
 if __name__ == '__main__':
-    cfg = {
-        'backbone':      'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'pretrained_weight': 'imagenet1k_v1',
-        'res5_dilation': False,
-        'freeze_at': 0,
-    }
+
+    class FcosBaseConfig(object):
+        def __init__(self):
+            self.backbone = "resnet18"
+            self.bk_norm = "FrozeBN"
+            self.res5_dilation = False
+            self.use_pretrained = True
+            self.freeze_at = 0
+
+    cfg = FcosBaseConfig()
     model, feat_dim = build_resnet(cfg)
     print(feat_dim)
 

+ 4 - 4
odlab/models/detectors/__init__.py

@@ -6,17 +6,17 @@ from .yolof.build import build_yolof
 from .detr.build  import build_detr
 
 
-def build_model(args, cfg, num_classes=80, is_val=False):
+def build_model(args, cfg, is_val=False):
     # ------------ build object detector ------------
     ## FCOS    
     if 'fcos' in args.model:
-        model, criterion = build_fcos(cfg, num_classes, is_val)
+        model, criterion = build_fcos(cfg, is_val)
     ## YOLOF    
     elif 'yolof' in args.model:
-        model, criterion = build_yolof(cfg, num_classes, is_val)
+        model, criterion = build_yolof(cfg, is_val)
     ## DETR    
     elif 'detr' in args.model:
-        model, criterion = build_detr(cfg, num_classes, is_val)
+        model, criterion = build_detr(cfg, is_val)
     else:
         raise NotImplementedError("Unknown detector: {}".args.model)
     

+ 7 - 7
odlab/models/detectors/fcos/build.py

@@ -1,24 +1,24 @@
 #!/usr/bin/env python3
 # -*- coding:utf-8 -*-
 
-from .criterion import build_criterion
+from .criterion import SetCriterion
 from .fcos import FCOS
 
 
 # build FCOS
-def build_fcos(cfg, num_classes=80, is_val=False):
+def build_fcos(cfg, is_val=False):
     # -------------- Build FCOS --------------
     model = FCOS(cfg         = cfg,
-                 num_classes = num_classes,
-                 conf_thresh = cfg['train_conf_thresh'] if is_val else cfg['test_conf_thresh'],
-                 nms_thresh  = cfg['train_nms_thresh']  if is_val else cfg['test_nms_thresh'],
-                 topk        = cfg['train_topk']        if is_val else cfg['test_topk'],
+                 num_classes = cfg.num_classes,
+                 conf_thresh = cfg.train_conf_thresh if is_val else cfg.test_conf_thresh,
+                 nms_thresh  = cfg.train_nms_thresh  if is_val else cfg.test_nms_thresh,
+                 topk        = cfg.train_topk        if is_val else cfg.test_topk,
                  )
             
     # -------------- Build Criterion --------------
     criterion = None
     if is_val:
         # build criterion for training
-        criterion = build_criterion(cfg, num_classes)
+        criterion = SetCriterion(cfg)
 
     return model, criterion

+ 16 - 22
odlab/models/detectors/fcos/criterion.py

@@ -9,33 +9,33 @@ from utils.distributed_utils import get_world_size, is_dist_avail_and_initialize
 from .matcher import FcosMatcher, SimOtaMatcher
 
 
-class Criterion(nn.Module):
-    def __init__(self, cfg, num_classes=90):
+class SetCriterion(nn.Module):
+    def __init__(self, cfg):
         super().__init__()
         # ------------- Basic parameters -------------
         self.cfg = cfg
-        self.num_classes = num_classes
+        self.num_classes = cfg.num_classes
         # ------------- Focal loss -------------
-        self.alpha = cfg['focal_loss_alpha']
-        self.gamma = cfg['focal_loss_gamma']
+        self.alpha = cfg.focal_loss_alpha
+        self.gamma = cfg.focal_loss_gamma
         # ------------- Loss weight -------------
-        self.weight_dict = {'loss_cls': cfg['loss_cls_weight'],
-                            'loss_reg': cfg['loss_reg_weight'],
-                            'loss_ctn': cfg['loss_ctn_weight']}
+        self.weight_dict = {'loss_cls': cfg.loss_cls_weight,
+                            'loss_reg': cfg.loss_reg_weight,
+                            'loss_ctn': cfg.loss_ctn_weight}
         # ------------- Matcher -------------
-        self.matcher_cfg = cfg['matcher_hpy']
-        if cfg['matcher'] == 'fcos_matcher':
-            self.matcher = FcosMatcher(num_classes,
+        self.matcher_cfg = cfg.matcher_hpy
+        if cfg.matcher == 'fcos_matcher':
+            self.matcher = FcosMatcher(cfg.num_classes,
                                        self.matcher_cfg['center_sampling_radius'],
                                        self.matcher_cfg['object_sizes_of_interest'],
                                        [1., 1., 1., 1.]
                                        )
-        elif cfg['matcher'] == 'simota':
-            self.matcher = SimOtaMatcher(num_classes,
+        elif cfg.matcher == 'simota':
+            self.matcher = SimOtaMatcher(cfg.num_classes,
                                          self.matcher_cfg['soft_center_radius'],
                                          self.matcher_cfg['topk_candidates'])
         else:
-            raise NotImplementedError("Unknown matcher: {}.".format(cfg['matcher']))
+            raise NotImplementedError("Unknown matcher: {}.".format(cfg.matcher))
 
     def loss_labels(self, pred_cls, tgt_cls, num_boxes=1.0):
         """
@@ -249,18 +249,12 @@ class Criterion(nn.Module):
                                  'labels': [...], 
                                  'orig_size': ...}, ...]
         """
-        if self.cfg['matcher'] == "fcos_matcher":
+        if self.cfg.matcher == "fcos_matcher":
             return self.fcos_loss(outputs, targets)
-        elif self.cfg['matcher'] == "simota":
+        elif self.cfg.matcher == "simota":
             return self.ota_loss(outputs, targets)
         else:
             raise NotImplementedError
-            
-
-# build criterion
-def build_criterion(cfg, num_classes=80):
-    criterion = Criterion(cfg=cfg, num_classes=num_classes)
-    return criterion
 
 
 if __name__ == "__main__":

+ 6 - 7
odlab/models/detectors/yolof/build.py

@@ -1,24 +1,23 @@
 #!/usr/bin/env python3
 # -*- coding:utf-8 -*-
 
-from .criterion import build_criterion
+from .criterion import SetCriterion
 from .yolof import YOLOF
 
 
 # build YOLOF
-def build_yolof(cfg, num_classes=80, is_val=False):
+def build_yolof(cfg, is_val=False):
     # -------------- Build YOLOF --------------
     model = YOLOF(cfg         = cfg,
-                  num_classes = num_classes,
-                  conf_thresh = cfg['train_conf_thresh'] if is_val else cfg['test_conf_thresh'],
-                  nms_thresh  = cfg['train_nms_thresh']  if is_val else cfg['test_nms_thresh'],
-                  topk        = cfg['train_topk']        if is_val else cfg['test_topk'],
+                  conf_thresh = cfg.train_conf_thresh if is_val else cfg.test_conf_thresh,
+                  nms_thresh  = cfg.train_nms_thresh  if is_val else cfg.test_nms_thresh,
+                  topk        = cfg.train_topk        if is_val else cfg.test_topk,
                   )
             
     # -------------- Build Criterion --------------
     criterion = None
     if is_val:
         # build criterion for training
-        criterion = build_criterion(cfg, num_classes)
+        criterion = SetCriterion(cfg)
 
     return model, criterion

+ 8 - 13
odlab/models/detectors/yolof/criterion.py

@@ -13,23 +13,23 @@ from utils.distributed_utils import get_world_size, is_dist_avail_and_initialize
 from .matcher import UniformMatcher
 
 
-class Criterion(nn.Module):
+class SetCriterion(nn.Module):
     """
         This code referenced to https://github.com/megvii-model/YOLOF/blob/main/playground/detection/coco/yolof/yolof_base/yolof.py
     """
-    def __init__(self, cfg, num_classes=80):
+    def __init__(self, cfg):
         super().__init__()
         # ------------- Basic parameters -------------
         self.cfg = cfg
-        self.num_classes = num_classes
+        self.num_classes = cfg.num_classes
         # ------------- Focal loss -------------
-        self.alpha = cfg['focal_loss_alpha']
-        self.gamma = cfg['focal_loss_gamma']
+        self.alpha = cfg.focal_loss_alpha
+        self.gamma = cfg.focal_loss_gamma
         # ------------- Loss weight -------------
-        self.weight_dict = {'loss_cls': cfg['loss_cls_weight'],
-                            'loss_reg': cfg['loss_reg_weight']}
+        self.weight_dict = {'loss_cls': cfg.loss_cls_weight,
+                            'loss_reg': cfg.loss_reg_weight}
         # ------------- Matcher -------------
-        self.matcher_cfg = cfg['matcher_hpy']
+        self.matcher_cfg = cfg.matcher_hpy
         self.matcher = UniformMatcher(self.matcher_cfg['topk_candidates'])
 
     def loss_labels(self, pred_cls, tgt_cls, num_boxes):
@@ -142,10 +142,5 @@ class Criterion(nn.Module):
         return loss_dict
 
 
-def build_criterion(cfg, num_classes=80):
-    criterion = Criterion(cfg=cfg, num_classes=num_classes)
-    return criterion
-
-    
 if __name__ == "__main__":
     pass

+ 10 - 12
odlab/utils/lr_scheduler.py

@@ -9,14 +9,12 @@ class LinearWarmUpScheduler(object):
         self.wp_iter = wp_iter
         self.warmup_factor = warmup_factor
 
-
     def set_lr(self, optimizer, lr):
         for param_group in optimizer.param_groups:
             init_lr = param_group['initial_lr']
             ratio = init_lr / self.base_lr
             param_group['lr'] = lr * ratio
 
-
     def __call__(self, iter, optimizer):
         # warmup
         alpha = iter / self.wp_iter
@@ -27,13 +25,13 @@ class LinearWarmUpScheduler(object):
 ## Build WP LR Scheduler
 def build_wp_lr_scheduler(cfg, base_lr=0.01):
     print('==============================')
-    print('WarmUpScheduler: {}'.format(cfg['warmup']))
+    print('WarmUpScheduler: {}'.format(cfg.warmup))
     print('--base_lr: {}'.format(base_lr))
-    print('--warmup_iters: {}'.format(cfg['warmup_iters']))
-    print('--warmup_factor: {}'.format(cfg['warmup_factor']))
+    print('--warmup_iters: {}'.format(cfg.warmup_iters))
+    print('--warmup_factor: {}'.format(cfg.warmup_factor))
 
-    if cfg['warmup'] == 'linear':
-        wp_lr_scheduler = LinearWarmUpScheduler(base_lr, cfg['warmup_iters'], cfg['warmup_factor'])
+    if cfg.warmup == 'linear':
+        wp_lr_scheduler = LinearWarmUpScheduler(base_lr, cfg.warmup_iters, cfg.warmup_factor)
     
     return wp_lr_scheduler
 
@@ -41,13 +39,13 @@ def build_wp_lr_scheduler(cfg, base_lr=0.01):
 # ------------------------- LR Scheduler -------------------------
 def build_lr_scheduler(cfg, optimizer, resume=None):
     print('==============================')
-    print('LR Scheduler: {}'.format(cfg['lr_scheduler']))
+    print('LR Scheduler: {}'.format(cfg.lr_scheduler))
 
-    if cfg['lr_scheduler'] == 'step':
+    if cfg.lr_scheduler == 'step':
         assert 'lr_epoch' in cfg
-        print('--lr_epoch: {}'.format(cfg['lr_epoch']))
-        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=cfg['lr_epoch'])
-    elif cfg['lr_scheduler'] == 'cosine':
+        print('--lr_epoch: {}'.format(cfg.lr_epoch))
+        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=cfg.lr_epoch)
+    elif cfg.lr_scheduler == 'cosine':
         pass
         
     if resume is not None:

+ 0 - 3
yolo/config/__init__.py

@@ -5,7 +5,6 @@ from .yolov3_config    import build_yolov3_config
 from .yolov5_config    import build_yolov5_config
 from .yolov5_af_config import build_yolov5af_config
 from .yolov6_config    import build_yolov6_config
-from .yolov7_af_config import build_yolov7af_config
 from .yolov8_config    import build_yolov8_config
 from .gelan_config     import build_gelan_config
 from .rtdetr_config    import build_rtdetr_config
@@ -26,8 +25,6 @@ def build_config(args):
         cfg = build_yolov5_config(args)
     elif 'yolov6' in args.model:
         cfg = build_yolov6_config(args)
-    elif 'yolov7_af' in args.model:
-        cfg = build_yolov7af_config(args)
     elif 'yolov8' in args.model:
         cfg = build_yolov8_config(args)
     elif 'gelan' in args.model:

+ 0 - 152
yolo/config/yolov7_af_config.py

@@ -1,152 +0,0 @@
-# yolo Config
-
-
-def build_yolov7af_config(args):
-    if   args.model == 'yolov7_af_s':
-        return Yolov7AFSConfig()
-    elif args.model == 'yolov7_af_l':
-        return Yolov7AFLConfig()
-    else:
-        raise NotImplementedError("No config for model: {}".format(args.model))
-    
-# YOLOv7AF-Base config
-class Yolov7AFBaseConfig(object):
-    def __init__(self) -> None:
-        # ---------------- Model config ----------------
-        self.width    = 1.0
-        self.reg_max  = 16
-        self.out_stride = [8, 16, 32]
-        self.max_stride = 32
-        self.num_levels = 3
-        self.scale      = "b"
-        ## Backbone
-        self.bk_act   = 'silu'
-        self.bk_norm  = 'BN'
-        self.bk_depthwise = False
-        self.use_pretrained = False
-        ## Neck
-        self.neck_act       = 'silu'
-        self.neck_norm      = 'BN'
-        self.neck_depthwise = False
-        self.neck_expand_ratio = 0.5
-        self.spp_pooling_size  = 5
-        ## FPN
-        self.fpn_act  = 'silu'
-        self.fpn_norm = 'BN'
-        self.fpn_depthwise = False
-        self.fpn_expansions = [0.5, 0.5]
-        self.fpn_block_bw = 4
-        self.fpn_block_dw = 1
-        ## Head
-        self.head_act  = 'silu'
-        self.head_norm = 'BN'
-        self.head_depthwise = False
-        self.head_dim       = 256
-        self.num_cls_head   = 2
-        self.num_reg_head   = 2
-
-        # ---------------- Post-process config ----------------
-        ## Post process
-        self.val_topk = 1000
-        self.val_conf_thresh = 0.001
-        self.val_nms_thresh  = 0.7
-        self.test_topk = 100
-        self.test_conf_thresh = 0.2
-        self.test_nms_thresh  = 0.5
-
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        self.tal_topk_candidates = 10
-        self.tal_alpha = 0.5
-        self.tal_beta  = 6.0
-        ## Loss weight
-        self.loss_cls = 0.5
-        self.loss_box = 7.5
-        self.loss_dfl = 1.5
-
-        # ---------------- ModelEMA config ----------------
-        self.use_ema = True
-        self.ema_decay = 0.9998
-        self.ema_tau   = 2000
-
-        # ---------------- Optimizer config ----------------
-        self.trainer      = 'yolo'
-        self.optimizer    = 'adamw'
-        self.per_image_lr = 0.001 / 64
-        self.base_lr      = None      # base_lr = per_image_lr * batch_size
-        self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
-        self.momentum     = 0.9
-        self.weight_decay = 0.05
-        self.clip_max_norm   = -1.
-        self.warmup_bias_lr  = 0.1
-        self.warmup_momentum = 0.8
-
-        # ---------------- Lr Scheduler config ----------------
-        self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
-        self.max_epoch    = 500
-        self.eval_epoch   = 10
-        self.no_aug_epoch = 20
-
-        # ---------------- Data process config ----------------
-        self.aug_type = 'yolo'
-        self.box_format = 'xyxy'
-        self.normalize_coords = False
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.0           # approximated by the YOLOX's mixup
-        self.multi_scale = [0.5, 1.25]   # multi scale: [img_size * 0.5, img_size * 1.25]
-        ## Pixel mean & std
-        self.pixel_mean = [0., 0., 0.]
-        self.pixel_std  = [255., 255., 255.]
-        ## Transforms
-        self.train_img_size = 640
-        self.test_img_size  = 640
-        self.use_ablu = True
-        self.affine_params = {
-            'degrees': 0.0,
-            'translate': 0.2,
-            'scale': [0.1, 2.0],
-            'shear': 0.0,
-            'perspective': 0.0,
-            'hsv_h': 0.015,
-            'hsv_s': 0.7,
-            'hsv_v': 0.4,
-        }
-
-    def print_config(self):
-        config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
-        for k, v in config_dict.items():
-            print("{} : {}".format(k, v))
-
-# YOLOv7-S
-class Yolov7AFSConfig(Yolov7AFBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 0.50
-        self.scale = "s"
-        self.fpn_expansions = [0.5, 1.0]
-        self.fpn_block_bw = 2
-        self.fpn_block_dw = 1
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# YOLOv7-L
-class Yolov7AFLConfig(Yolov7AFBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 1.0
-        self.scale = "l"
-        self.fpn_expansions = [0.5, 0.5]
-        self.fpn_block_bw = 4
-        self.fpn_block_dw = 1
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5

+ 1 - 5
yolo/models/__init__.py

@@ -8,7 +8,6 @@ from .yolov3.build    import build_yolov3
 from .yolov5.build    import build_yolov5
 from .yolov5_af.build import build_yolov5af
 from .yolov6.build    import build_yolov6
-from .yolov7_af.build import build_yolov7af
 from .yolov8.build    import build_yolov8
 from .gelan.build     import build_gelan
 from .rtdetr.build    import build_rtdetr
@@ -25,7 +24,7 @@ def build_model(args, cfg, is_val=False):
     ## Modified YOLOv3
     elif 'yolov3' in args.model:
         model, criterion = build_yolov3(cfg, is_val)
-    ## YOLOX
+    ## Anchor-free YOLOv5
     elif 'yolov5_af' in args.model:
         model, criterion = build_yolov5af(cfg, is_val)
     ## Modified YOLOv5
@@ -34,9 +33,6 @@ def build_model(args, cfg, is_val=False):
     ## YOLOv6
     elif 'yolov6' in args.model:
         model, criterion = build_yolov6(cfg, is_val)
-    ## Modified Anchor-free YOLOv7
-    elif 'yolov7_af' in args.model:
-        model, criterion = build_yolov7af(cfg, is_val)
     ## YOLOv8
     elif 'yolov8' in args.model:
         model, criterion = build_yolov8(cfg, is_val)

+ 0 - 1
yolo/models/rtdetr/README.md

@@ -5,7 +5,6 @@
 |--------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|------|
 | RT-DETR-R18  | 4xb4  |  640  |           45.5         |        63.5       |        66.8       |        21.0        | [ckpt](https://github.com/yjh0410/ODLab-World/releases/download/coco_weight/rtdetr_r18_coco.pth) | [log](https://github.com/yjh0410/ODLab-World/releases/download/coco_weight/RT-DETR-R18-COCO.txt)|
 | RT-DETR-R50  | 4xb4  |  640  |           50.6         |        69.4       |       112.1       |        36.7        | [ckpt](https://github.com/yjh0410/ODLab-World/releases/download/coco_weight/rtdetr_r50_coco.pth) | [log](https://github.com/yjh0410/ODLab-World/releases/download/coco_weight/RT-DETR-R50-COCO.txt)|
-| RT-DETR-R101 | 4xb4  |  640  |                        |                   |                   |                    |  | |
 
 
 ## Train RT-DETR

+ 1 - 1
yolo/models/yolov6/README.md

@@ -4,7 +4,7 @@
 
 |   Model  | Batch | Scale | AP<sup>val<br>0.5 | Weight |  Logs  |
 |----------|-------|-------|-------------------|--------|--------|
-| YOLOv6-S | 1xb16 |  640  |       79.0        | [ckpt](https://github.com/yjh0410/YOLO-Tutorial-v6/releases/download/yolo_tutorial_ckpt/yolov6_s_voc.pth) | [log](https://github.com/yjh0410/YOLO-Tutorial-v6/releases/download/yolo_tutorial_ckpt/YOLOv6-S-VOC.txt) |
+| YOLOv6-S | 1xb16 |  640  |               | [ckpt](https://github.com/yjh0410/YOLO-Tutorial-v6/releases/download/yolo_tutorial_ckpt/yolov6_s_voc.pth) | [log](https://github.com/yjh0410/YOLO-Tutorial-v6/releases/download/yolo_tutorial_ckpt/YOLOv6-S-VOC.txt) |
 
 - COCO
 

+ 0 - 61
yolo/models/yolov7_af/README.md

@@ -1,61 +0,0 @@
-# Anchor-free YOLOv7:
-
-- VOC
-
-|     Model   | Batch | Scale | AP<sup>val<br>0.5 | Weight |  Logs  |
-|-------------|-------|-------|-------------------|--------|--------|
-| YOLOv7-AF-S | 1xb16 |  640  |       82.7        | [ckpt](https://github.com/yjh0410/YOLO-Tutorial-v7/releases/download/yolo_tutorial_ckpt/yolov7_af_s_voc.pth) | [log](https://github.com/yjh0410/YOLO-Tutorial-v7/releases/download/yolo_tutorial_ckpt/YOLOv7-AF-S-VOC.txt) |
-
-- COCO
-
-|    Model    | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |  Logs  |
-|-------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|--------|
-| YOLOv7-AF-S | 1xb16 |  640  |                    |               |   26.9            |   8.9             |  |  |
-
-- For training, we train redesigned YOLOv7-AF with 500 epochs on COCO. We also use the gradient accumulation.
-- For data augmentation, we use the RandomAffine, RandomHSV, Mosaic and YOLOX's Mixup augmentation.
-- For optimizer, we use AdamW with weight decay of 0.05 and per image base lr of 0.001 / 64.
-- For learning rate scheduler, we use cosine decay scheduler.
-- For batch size, we set it to 16, and we also use the gradient accumulation to approximate batch size of 256.
-
-
-## Train YOLOv7-AF
-### Single GPU
-Taking training YOLOv7-AF-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m yolov7_af_s -bs 16 --fp16 
-```
-
-### Multi GPU
-Taking training YOLOv7-AF-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda --distributed -d coco --root path/to/coco -m yolov7_af_s -bs 16 --fp16 
-```
-
-## Test YOLOv7-AF
-Taking testing YOLOv7-AF-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m yolov7_af_s --weight path/to/yolov7.pth --show 
-```
-
-## Evaluate YOLOv7-AF
-Taking evaluating YOLOv7-AF-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco --root path/to/coco -m yolov7_af_s --weight path/to/yolov7.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m yolov7_af_s --weight path/to/weight --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m yolov7_af_s --weight path/to/weight --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m yolov7_af_s --weight path/to/weight --show --gif
-```

+ 0 - 24
yolo/models/yolov7_af/build.py

@@ -1,24 +0,0 @@
-import torch.nn as nn
-
-from .loss import SetCriterion
-from .yolov7_af import Yolov7AF
-
-
-# build object detector
-def build_yolov7af(cfg, is_val=False):
-    # -------------- Build YOLO --------------
-    model = Yolov7AF(cfg, is_val)
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if is_val:
-        # build criterion for training
-        criterion = SetCriterion(cfg)
-        
-    return model, criterion

+ 0 - 187
yolo/models/yolov7_af/loss.py

@@ -1,187 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from utils.box_ops import bbox2dist, bbox_iou
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import TaskAlignedAssigner
-
-
-class SetCriterion(object):
-    def __init__(self, cfg):
-        # --------------- Basic parameters ---------------
-        self.cfg = cfg
-        self.reg_max = cfg.reg_max
-        self.num_classes = cfg.num_classes
-        # --------------- Loss config ---------------
-        self.loss_cls_weight = cfg.loss_cls
-        self.loss_box_weight = cfg.loss_box
-        self.loss_dfl_weight = cfg.loss_dfl
-        # --------------- Matcher config ---------------
-        self.matcher = TaskAlignedAssigner(num_classes     = cfg.num_classes,
-                                           topk_candidates = cfg.tal_topk_candidates,
-                                           alpha           = cfg.tal_alpha,
-                                           beta            = cfg.tal_beta
-                                           )
-
-    def loss_classes(self, pred_cls, gt_score):
-        # compute bce loss
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
-
-        return loss_cls
-    
-    def loss_bboxes(self, pred_box, gt_box, bbox_weight):
-        # regression loss
-        ious = bbox_iou(pred_box, gt_box, xywh=False, CIoU=True)
-        loss_box = (1.0 - ious.squeeze(-1)) * bbox_weight
-
-        return loss_box
-    
-    def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
-        # rescale coords by stride
-        gt_box_s = gt_box / stride
-        anchor_s = anchor / stride
-
-        # compute deltas
-        gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.reg_max - 1)
-
-        gt_left = gt_ltrb_s.to(torch.long)
-        gt_right = gt_left + 1
-
-        weight_left = gt_right.to(torch.float) - gt_ltrb_s
-        weight_right = 1 - weight_left
-
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss_dfl = (loss_left + loss_right).mean(-1)
-        
-        if bbox_weight is not None:
-            loss_dfl *= bbox_weight
-
-        return loss_dfl
-
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_reg']: List(Tensor) [B, M, 4*(reg_max+1)]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['anchors']: List(Tensor) [M, 2]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            outputs['stride_tensor']: List(Tensor) [M, 1]
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        bs, num_anchors = cls_preds.shape[:2]
-        device = cls_preds.device
-        anchors = torch.cat(outputs['anchors'], dim=0)
-        
-        # --------------- label assignment ---------------
-        gt_score_targets = []
-        gt_bbox_targets = []
-        fg_masks = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)     # [Mp,]
-            tgt_boxs = targets[batch_idx]["boxes"].to(device)        # [Mp, 4]
-
-            if self.cfg.normalize_coords:
-                img_h, img_w = outputs['image_size']
-                tgt_boxs[..., [0, 2]] *= img_w
-                tgt_boxs[..., [1, 3]] *= img_h
-            
-            if self.cfg.box_format == 'xywh':
-                tgt_boxs_x1y1 = tgt_boxs[..., :2] - 0.5 * tgt_boxs[..., 2:]
-                tgt_boxs_x2y2 = tgt_boxs[..., :2] + 0.5 * tgt_boxs[..., 2:]
-                tgt_boxs = torch.cat([tgt_boxs_x1y1, tgt_boxs_x2y2], dim=-1)
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
-                # There is no valid gt
-                fg_mask  = cls_preds.new_zeros(1, num_anchors).bool()               #[1, M,]
-                gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
-                gt_box   = cls_preds.new_zeros((1, num_anchors, 4))                  #[1, M, 4]
-            else:
-                tgt_labels = tgt_labels[None, :, None]      # [1, Mp, 1]
-                tgt_boxs = tgt_boxs[None]                   # [1, Mp, 4]
-                (
-                    _,
-                    gt_box,     # [1, M, 4]
-                    gt_score,   # [1, M, C]
-                    fg_mask,    # [1, M,]
-                    _
-                ) = self.matcher(
-                    pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(), 
-                    pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
-                    anc_points = anchors,
-                    gt_labels = tgt_labels,
-                    gt_bboxes = tgt_boxs
-                    )
-            gt_score_targets.append(gt_score)
-            gt_bbox_targets.append(gt_box)
-            fg_masks.append(fg_mask)
-
-        # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
-        fg_masks = torch.cat(fg_masks, 0).view(-1)                                    # [BM,]
-        gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes)  # [BM, C]
-        gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4)                   # [BM, 4]
-        num_fgs = gt_score_targets.sum()
-        
-        # Average loss normalizer across all the GPUs
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.loss_classes(cls_preds, gt_score_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        box_targets_pos = gt_bbox_targets.view(-1, 4)[fg_masks]
-        bbox_weight = gt_score_targets[fg_masks].sum(-1)
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
-        loss_box = loss_box.sum() / num_fgs
-
-        # ------------------ Distribution focal loss  ------------------
-        ## process anchors
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
-        ## process stride tensors
-        strides = torch.cat(outputs['stride_tensor'], dim=0)
-        strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
-        ## fg preds
-        reg_preds_pos = reg_preds.view(-1, 4*self.reg_max)[fg_masks]
-        anchors_pos = anchors[fg_masks]
-        strides_pos = strides[fg_masks]
-        ## compute dfl
-        loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos, bbox_weight)
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight + loss_dfl * self.loss_dfl_weight
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                loss_dfl = loss_dfl,
-                losses = losses
-        )
-
-        return loss_dict
-    
-
-if __name__ == "__main__":
-    pass

+ 0 - 199
yolo/models/yolov7_af/matcher.py

@@ -1,199 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import bbox_iou
-
-
-# -------------------------- Task Aligned Assigner --------------------------
-class TaskAlignedAssigner(nn.Module):
-    def __init__(self,
-                 num_classes     = 80,
-                 topk_candidates = 10,
-                 alpha           = 0.5,
-                 beta            = 6.0, 
-                 eps             = 1e-9):
-        super(TaskAlignedAssigner, self).__init__()
-        self.topk_candidates = topk_candidates
-        self.num_classes = num_classes
-        self.bg_idx = num_classes
-        self.alpha = alpha
-        self.beta = beta
-        self.eps = eps
-
-    @torch.no_grad()
-    def forward(self,
-                pd_scores,
-                pd_bboxes,
-                anc_points,
-                gt_labels,
-                gt_bboxes):
-        self.bs = pd_scores.size(0)
-        self.n_max_boxes = gt_bboxes.size(1)
-
-        mask_pos, align_metric, overlaps = self.get_pos_mask(
-            pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
-
-        target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
-            mask_pos, overlaps, self.n_max_boxes)
-
-        # Assigned target
-        target_labels, target_bboxes, target_scores = self.get_targets(
-            gt_labels, gt_bboxes, target_gt_idx, fg_mask)
-
-        # normalize
-        align_metric *= mask_pos
-        pos_align_metrics = align_metric.amax(axis=-1, keepdim=True)  # b, max_num_obj
-        pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True)  # b, max_num_obj
-        norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
-        target_scores = target_scores * norm_align_metric
-
-        return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
-
-    def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get in_gts mask, (b, max_num_obj, h*w)
-        mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts)
-        # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric)
-        # merge all mask to a final mask, (b, max_num_obj, h*w)
-        mask_pos = mask_topk * mask_in_gts
-
-        return mask_pos, align_metric, overlaps
-
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts):
-        """Compute alignment metric given predicted and ground truth bounding boxes."""
-        na = pd_bboxes.shape[-2]
-        mask_in_gts = mask_in_gts.bool()  # b, max_num_obj, h*w
-        overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device)
-        bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device)
-
-        ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).expand(-1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.squeeze(-1)  # b, max_num_obj
-        # Get the scores of each grid for each gt cls
-        bbox_scores[mask_in_gts] = pd_scores[ind[0], :, ind[1]][mask_in_gts]  # b, max_num_obj, h*w
-
-        # (b, max_num_obj, 1, 4), (b, 1, h*w, 4)
-        pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_in_gts]
-        gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_in_gts]
-        overlaps[mask_in_gts] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0)
-
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
-        return align_metric, overlaps
-
-    def select_topk_candidates(self, metrics, largest=True):
-        """
-        Args:
-            metrics: (b, max_num_obj, h*w).
-            topk_mask: (b, max_num_obj, topk) or None
-        """
-        # (b, max_num_obj, topk)
-        topk_metrics, topk_idxs = torch.topk(metrics, self.topk_candidates, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs)
-        # (b, max_num_obj, topk)
-        topk_idxs.masked_fill_(~topk_mask, 0)
-
-        # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        count_tensor = torch.zeros(metrics.shape, dtype=torch.int8, device=topk_idxs.device)
-        ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device)
-        for k in range(self.topk_candidates):
-            # Expand topk_idxs for each value of k and add 1 at the specified positions
-            count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones)
-        # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device))
-        # Filter invalid bboxes
-        count_tensor.masked_fill_(count_tensor > 1, 0)
-
-        return count_tensor.to(metrics.dtype)
-
-    def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        # Assigned target labels, (b, 1)
-        batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
-        target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
-        target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
-
-        # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w, 4)
-        target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
-
-        # Assigned target scores
-        target_labels.clamp_(0)
-
-        # 10x faster than F.one_hot()
-        target_scores = torch.zeros((target_labels.shape[0], target_labels.shape[1], self.num_classes),
-                                    dtype=torch.int64,
-                                    device=target_labels.device)  # (b, h*w, 80)
-        target_scores.scatter_(2, target_labels.unsqueeze(-1), 1)
-
-        fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
-        target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
-
-        return target_labels, target_bboxes, target_scores
-    
-
-# -------------------------- Basic Functions --------------------------
-def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
-    """select the positive anchors's center in gt
-    Args:
-        xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
-        gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    n_anchors = xy_centers.size(0)
-    bs, n_max_boxes, _ = gt_bboxes.size()
-    _gt_bboxes = gt_bboxes.reshape([-1, 4])
-    xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
-    gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
-    gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
-    b_lt = xy_centers - gt_bboxes_lt
-    b_rb = gt_bboxes_rb - xy_centers
-    bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
-    bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
-    return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
-
-def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
-    """if an anchor box is assigned to multiple gts,
-        the one with the highest iou will be selected.
-    Args:
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-        overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    Return:
-        target_gt_idx (Tensor): shape(bs, num_total_anchors)
-        fg_mask (Tensor): shape(bs, num_total_anchors)
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    fg_mask = mask_pos.sum(-2)
-    if fg_mask.max() > 1:  # one anchor is assigned to multiple gt_bboxes
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1)  # (b, n_max_boxes, h*w)
-        max_overlaps_idx = overlaps.argmax(1)  # (b, h*w)
-
-        is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device)
-        is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1)
-
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float()  # (b, n_max_boxes, h*w)
-        fg_mask = mask_pos.sum(-2)
-    # Find each grid serve which gt(index)
-    target_gt_idx = mask_pos.argmax(-2)  # (b, h*w)
-
-    return target_gt_idx, fg_mask, mask_pos
-
-def iou_calculator(box1, box2, eps=1e-9):
-    """Calculate iou for batch
-    Args:
-        box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
-        box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    box1 = box1.unsqueeze(2)  # [N, M1, 4] -> [N, M1, 1, 4]
-    box2 = box2.unsqueeze(1)  # [N, M2, 4] -> [N, 1, M2, 4]
-    px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
-    gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
-    x1y1 = torch.maximum(px1y1, gx1y1)
-    x2y2 = torch.minimum(px2y2, gx2y2)
-    overlap = (x2y2 - x1y1).clip(0).prod(-1)
-    area1 = (px2y2 - px1y1).clip(0).prod(-1)
-    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
-    union = area1 + area2 - overlap + eps
-
-    return overlap / union

+ 0 - 152
yolo/models/yolov7_af/yolov7_af.py

@@ -1,152 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .yolov7_af_backbone import Yolov7Backbone
-from .yolov7_af_neck     import SPPFBlockCSP
-from .yolov7_af_pafpn    import Yolov7PaFPN
-from .yolov7_af_head     import Yolov7DetHead
-from .yolov7_af_pred     import Yolov7AFDetPredLayer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# Anchor-free YOLOv7
-class Yolov7AF(nn.Module):
-    def __init__(self,
-                 cfg,
-                 is_val = False,
-                 ) -> None:
-        super(Yolov7AF, self).__init__()
-        # ---------------------- Basic setting ----------------------
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        ## Post-process parameters
-        self.topk_candidates  = cfg.val_topk        if is_val else cfg.test_topk
-        self.conf_thresh      = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
-        self.nms_thresh       = cfg.val_nms_thresh  if is_val else cfg.test_nms_thresh
-        self.no_multi_labels  = False if is_val else True
-        
-        # ---------------------- Network Parameters ----------------------
-        ## Backbone
-        self.backbone = Yolov7Backbone(cfg)
-        self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
-        ## Neck
-        self.neck     = SPPFBlockCSP(cfg, self.pyramid_feat_dims[-1], self.pyramid_feat_dims[-1]//2)
-        self.pyramid_feat_dims[-1] = self.neck.out_dim
-        ## Neck: PaFPN
-        self.fpn      = Yolov7PaFPN(cfg, self.pyramid_feat_dims)
-        ## Head
-        self.head     = Yolov7DetHead(cfg, self.fpn.out_dims)
-        ## Pred
-        self.pred     = Yolov7AFDetPredLayer(cfg, self.head.cls_head_dim, self.head.reg_head_dim)
-
-    def post_process(self, cls_preds, box_preds):
-        """
-        We process predictions at each scale hierarchically
-        Input:
-            cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
-            box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = cls_pred_i.sigmoid().flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes)
-        
-        return bboxes, scores, labels
-    
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.head(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred(cls_feats, reg_feats)
-        outputs['image_size'] = [x.shape[2], x.shape[3]]
-
-        if not self.training:
-            all_cls_preds = outputs['pred_cls']
-            all_box_preds = outputs['pred_box']
-
-            # post process
-            bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-            outputs = {
-                "scores": scores,
-                "labels": labels,
-                "bboxes": bboxes
-            }
-        
-        return outputs 

+ 0 - 117
yolo/models/yolov7_af/yolov7_af_backbone.py

@@ -1,117 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolov7_af_basic import BasicConv, MDown, ELANLayer
-except:
-    from  yolov7_af_basic import BasicConv, MDown, ELANLayer
-
-
-# ELANNet
-class Yolov7Backbone(nn.Module):
-    def __init__(self, cfg):
-        super(Yolov7Backbone, self).__init__()
-        # ---------------- Basic parameters ----------------
-        self.model_scale = cfg.scale
-        if self.model_scale in ["l", "x"]:
-            self.elan_depth = 2
-            self.feat_dims = [round(64   * cfg.width), round(128  * cfg.width), round(256  * cfg.width),
-                              round(512  * cfg.width), round(1024 * cfg.width), round(1024 * cfg.width)]
-            self.last_stage_eratio = 0.25
-        if self.model_scale in ["n", "s"]:
-            self.elan_depth = 1
-            self.feat_dims = [round(64   * cfg.width), round(64  * cfg.width), round(128  * cfg.width),
-                              round(256  * cfg.width), round(512 * cfg.width), round(1024 * cfg.width)]
-            self.last_stage_eratio = 0.5
-
-        # ---------------- Model parameters ----------------
-        
-        # large backbone
-        self.layer_1 = nn.Sequential(
-            BasicConv(3, self.feat_dims[0]//2, kernel_size=3, padding=1, stride=1,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            BasicConv(self.feat_dims[0]//2, self.feat_dims[0], kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            BasicConv(self.feat_dims[0], self.feat_dims[0], kernel_size=3, padding=1, stride=1,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise)
-        )
-        self.layer_2 = nn.Sequential(   
-            BasicConv(self.feat_dims[0], self.feat_dims[1],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),      
-            ELANLayer(self.feat_dims[1], self.feat_dims[2],
-                      expansion=0.5, num_blocks=self.elan_depth,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),      
-        )
-        self.layer_3 = nn.Sequential(
-            MDown(self.feat_dims[2], self.feat_dims[2],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),             
-            ELANLayer(self.feat_dims[2], self.feat_dims[3],
-                      expansion=0.5, num_blocks=self.elan_depth,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),      
-        )
-        self.layer_4 = nn.Sequential(
-            MDown(self.feat_dims[3], self.feat_dims[3],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),             
-            ELANLayer(self.feat_dims[3], self.feat_dims[4],
-                      expansion=0.5, num_blocks=self.elan_depth,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),      
-        )
-        self.layer_5 = nn.Sequential(
-            MDown(self.feat_dims[4], self.feat_dims[4],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),             
-            ELANLayer(self.feat_dims[4], self.feat_dims[5],
-                      expansion=self.last_stage_eratio, num_blocks=self.elan_depth,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),      
-        )
-
-        # Initialize all layers
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    class BaseConfig(object):
-        def __init__(self) -> None:
-            self.bk_act = 'silu'
-            self.bk_norm = 'BN'
-            self.bk_depthwise = False
-            self.width = 0.5
-            self.depth = 0.34
-            self.scale = "s"
-
-    cfg = BaseConfig()
-    model = Yolov7Backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 190
yolo/models/yolov7_af/yolov7_af_basic.py

@@ -1,190 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-
-# --------------------- Basic modules ---------------------
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 dilation=1,               # dilation
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                 depthwise :bool = False
-                ):
-        super(BasicConv, self).__init__()
-        self.depthwise = depthwise
-        if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
-            self.norm = get_norm(norm_type, out_dim)
-        else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
-            self.norm1 = get_norm(norm_type, in_dim)
-            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
-            self.norm2 = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        if not self.depthwise:
-            return self.act(self.norm(self.conv(x)))
-        else:
-            # Depthwise conv
-            x = self.norm1(self.conv1(x))
-            # Pointwise conv
-            x = self.norm2(self.conv2(x))
-            return x
-
-
-# ---------------------------- Basic Modules ----------------------------
-class MDown(nn.Module):
-    def __init__(self,
-                 in_dim    :int,
-                 out_dim   :int,
-                 act_type  :str   = 'silu',
-                 norm_type :str   = 'BN',
-                 depthwise :bool  = False,
-                 ) -> None:
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.downsample_1 = nn.Sequential(
-            nn.MaxPool2d((2, 2), stride=2),
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        )
-        self.downsample_2 = nn.Sequential(
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type),
-            BasicConv(inter_dim, inter_dim,
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        if in_dim == out_dim:
-            self.output_proj = nn.Identity()
-        else:
-            self.output_proj = BasicConv(inter_dim * 2, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        x1 = self.downsample_1(x)
-        x2 = self.downsample_2(x)
-
-        out = self.output_proj(torch.cat([x1, x2], dim=1))
-
-        return out
-
-class ELANLayer(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expansion  :float = 0.5,
-                 num_blocks :int   = 1,
-                 act_type   :str   = 'silu',
-                 norm_type  :str   = 'BN',
-                 depthwise  :bool  = False,
-                 ) -> None:
-        super(ELANLayer, self).__init__()
-        self.inter_dim = round(in_dim * expansion)
-        self.conv_layer_1 = BasicConv(in_dim, self.inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_2 = BasicConv(in_dim, self.inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_3 = BasicConv(self.inter_dim * 4, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.elan_layer_1 = nn.Sequential(*[BasicConv(self.inter_dim, self.inter_dim,
-                                                      kernel_size=3, padding=1,
-                                                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                      for _ in range(num_blocks)])
-        self.elan_layer_2 = nn.Sequential(*[BasicConv(self.inter_dim, self.inter_dim,
-                                                      kernel_size=3, padding=1,
-                                                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                      for _ in range(num_blocks)])
-
-    def forward(self, x):
-        # Input proj
-        x1 = self.conv_layer_1(x)
-        x2 = self.conv_layer_2(x)
-        x3 = self.elan_layer_1(x2)
-        x4 = self.elan_layer_2(x3)
-    
-        out = self.conv_layer_3(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-## PaFPN's ELAN-Block proposed by YOLOv7
-class ELANLayerFPN(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expansions   :List = [0.5, 0.5],
-                 branch_width :int  = 4,
-                 branch_depth :int  = 1,
-                 act_type     :str  = 'silu',
-                 norm_type    :str  = 'BN',
-                 depthwise=False):
-        super(ELANLayerFPN, self).__init__()
-        # Basic parameters
-        inter_dim  = round(in_dim * expansions[0])
-        inter_dim2 = round(inter_dim * expansions[1]) 
-        # Network structure
-        self.cv1 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.cv3 = nn.ModuleList()
-        for idx in range(round(branch_width)):
-            if idx == 0:
-                cvs = [BasicConv(inter_dim, inter_dim2,
-                                 kernel_size=3, padding=1,
-                                 act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cvs = [BasicConv(inter_dim2, inter_dim2,
-                                 kernel_size=3, padding=1,
-                                 act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            # deeper
-            if round(branch_depth) > 1:
-                for _ in range(1, round(branch_depth)):
-                    cvs.append(BasicConv(inter_dim2, inter_dim2, kernel_size=3, padding=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-                self.cv3.append(nn.Sequential(*cvs))
-            else:
-                self.cv3.append(cvs[0])
-
-        self.output_proj = BasicConv(inter_dim*2+inter_dim2*len(self.cv3), out_dim,
-                                     kernel_size=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        inter_outs = [x1, x2]
-        for m in self.cv3:
-            y1 = inter_outs[-1]
-            y2 = m(y1)
-            inter_outs.append(y2)
-        out = self.output_proj(torch.cat(inter_outs, dim=1))
-
-        return out

+ 0 - 126
yolo/models/yolov7_af/yolov7_af_head.py

@@ -1,126 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .yolov7_af_basic import BasicConv
-
-
-# -------------------- Detection Head --------------------
-## Single-level Detection Head
-class DetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 depthwise    :bool = False):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                reg_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class Yolov7DetHead(nn.Module):
-    def __init__(self, cfg, in_dims):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [DetHead(in_dim       = in_dims[level],
-                     cls_head_dim = max(in_dims[0], min(cfg.num_classes, 128)),
-                     reg_head_dim = max(in_dims[0]//4, 16, 4*cfg.reg_max),
-                     num_cls_head = cfg.num_cls_head,
-                     num_reg_head = cfg.num_reg_head,
-                     act_type     = cfg.head_act,
-                     norm_type    = cfg.head_norm,
-                     depthwise    = cfg.head_depthwise)
-                     for level in range(cfg.num_levels)
-                     ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats

+ 0 - 60
yolo/models/yolov7_af/yolov7_af_neck.py

@@ -1,60 +0,0 @@
-import torch
-import torch.nn as nn
-from .yolov7_af_basic import BasicConv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expansion=0.5):
-        super().__init__()
-        ## ----------- Basic Parameters -----------
-        inter_dim = round(in_dim * expansion)
-        self.out_dim = out_dim
-        ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
-                              stride=1,
-                              padding=cfg.spp_pooling_size // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * cfg.neck_expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.module = nn.Sequential(
-            BasicConv(inter_dim, inter_dim, kernel_size=3, padding=1, 
-                      act_type=cfg.neck_act, norm_type=cfg.neck_norm, depthwise=cfg.neck_depthwise),
-            SPPF(cfg, inter_dim, inter_dim, expansion=1.0),
-            BasicConv(inter_dim, inter_dim, kernel_size=3, padding=1, 
-                      act_type=cfg.neck_act, norm_type=cfg.neck_norm, depthwise=cfg.neck_depthwise),
-                      )
-        self.cv3 = BasicConv(inter_dim * 2, self.out_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.module(self.cv2(x))
-        y = self.cv3(torch.cat([x1, x2], dim=1))
-
-        return y

+ 0 - 114
yolo/models/yolov7_af/yolov7_af_pafpn.py

@@ -1,114 +0,0 @@
-from typing import List
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
-
-
-# PaFPN-ELAN (YOLOv7's)
-class Yolov7PaFPN(nn.Module):
-    def __init__(self, cfg, in_dims: List = [512, 1024, 512]):
-        super(Yolov7PaFPN, self).__init__()
-        # ----------------------------- Basic parameters -----------------------------
-        self.in_dims = in_dims
-        self.out_dims = [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
-        c3, c4, c5 = in_dims
-
-        # ----------------------------- Yolov7's Top-down FPN -----------------------------
-        ## P5 -> P4
-        self.reduce_layer_1 = BasicConv(c5, round(256*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.reduce_layer_2 = BasicConv(c4, round(256*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_1 = ELANLayerFPN(in_dim     = round(256*cfg.width) + round(256*cfg.width),
-                                             out_dim    = round(256*cfg.width),
-                                             expansions   = cfg.fpn_expansions,
-                                             branch_width = cfg.fpn_block_bw,
-                                             branch_depth = cfg.fpn_block_dw,
-                                             act_type   = cfg.fpn_act,
-                                             norm_type  = cfg.fpn_norm,
-                                             depthwise  = cfg.fpn_depthwise,
-                                             )
-        ## P4 -> P3
-        self.reduce_layer_3 = BasicConv(round(256*cfg.width), round(128*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.reduce_layer_4 = BasicConv(c3, round(128*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_2 = ELANLayerFPN(in_dim     = round(128*cfg.width) + round(128*cfg.width),
-                                             out_dim    = round(128*cfg.width),
-                                             expansions   = cfg.fpn_expansions,
-                                             branch_width = cfg.fpn_block_bw,
-                                             branch_depth = cfg.fpn_block_dw,
-                                             act_type   = cfg.fpn_act,
-                                             norm_type  = cfg.fpn_norm,
-                                             depthwise  = cfg.fpn_depthwise,
-                                             )
-        # ----------------------------- Yolov7's Bottom-up PAN -----------------------------
-        ## P3 -> P4
-        self.downsample_layer_1 = MDown(round(128*cfg.width), round(256*cfg.width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.bottom_up_layer_1 = ELANLayerFPN(in_dim     = round(256*cfg.width) + round(256*cfg.width),
-                                              out_dim    = round(256*cfg.width),
-                                              expansions   = cfg.fpn_expansions,
-                                              branch_width = cfg.fpn_block_bw,
-                                              branch_depth = cfg.fpn_block_dw,
-                                              act_type     = cfg.fpn_act,
-                                              norm_type    = cfg.fpn_norm,
-                                              depthwise    = cfg.fpn_depthwise,
-                                              )
-        ## P4 -> P5
-        self.downsample_layer_2 = MDown(round(256*cfg.width), round(512*cfg.width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.bottom_up_layer_2 = ELANLayerFPN(in_dim     = round(512*cfg.width) + c5,
-                                              out_dim    = round(512*cfg.width),
-                                              expansions   = cfg.fpn_expansions,
-                                              branch_width = cfg.fpn_block_bw,
-                                              branch_depth = cfg.fpn_block_dw,
-                                              act_type   = cfg.fpn_act,
-                                              norm_type  = cfg.fpn_norm,
-                                              depthwise  = cfg.fpn_depthwise,
-                                              )
-
-        # ----------------------------- Head conv layers -----------------------------
-        ## Head convs
-        self.head_conv_1 = BasicConv(round(128*cfg.width), round(256*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.head_conv_2 = BasicConv(round(256*cfg.width), round(512*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.head_conv_3 = BasicConv(round(512*cfg.width), round(1024*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # ------------------ Top down FPN ------------------
-        ## P5 -> P4
-        p5 = self.reduce_layer_1(c5)
-        p5_up = F.interpolate(p5, scale_factor=2.0)
-        p4 = self.reduce_layer_2(c4)
-        p4 = self.top_down_layer_1(torch.cat([p5_up, p4], dim=1))
-
-        ## P4 -> P3
-        p4_in = self.reduce_layer_3(p4)
-        p4_up = F.interpolate(p4_in, scale_factor=2.0)
-        p3 = self.reduce_layer_4(c3)
-        p3 = self.top_down_layer_2(torch.cat([p4_up, p3], dim=1))
-
-        # ------------------ Bottom up PAN ------------------
-        ## P3 -> P4
-        p3_ds = self.downsample_layer_1(p3)
-        p4 = torch.cat([p3_ds, p4], dim=1)
-        p4 = self.bottom_up_layer_1(p4)
-
-        ## P4 -> P5
-        p4_ds = self.downsample_layer_2(p4)
-        p5 = torch.cat([p4_ds, c5], dim=1)
-        p5 = self.bottom_up_layer_2(p5)
-
-        out_feats = [self.head_conv_1(p3), self.head_conv_2(p4), self.head_conv_3(p5)]
-            
-        return out_feats

+ 0 - 153
yolo/models/yolov7_af/yolov7_af_pred.py

@@ -1,153 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-# -------------------- Detection Pred Layer --------------------
-## Single-level pred layer
-class AFDetPredLayer(nn.Module):
-    def __init__(self,
-                 cls_dim     :int = 256,
-                 reg_dim     :int = 256,
-                 stride      :int = 32,
-                 reg_max     :int = 16,
-                 num_classes :int = 80,
-                 num_coords  :int = 4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.reg_max = reg_max
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred bias
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # pred
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # generate anchor boxes: [M, 4]
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-        # stride tensor: [M, 1]
-        stride_tensor = torch.ones_like(anchors[..., :1]) * self.stride
-        
-        # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-        
-        # output dict
-        outputs = {"pred_cls": cls_pred,            # List(Tensor) [B, M, C]
-                   "pred_reg": reg_pred,            # List(Tensor) [B, M, 4*(reg_max)]
-                   "anchors": anchors,              # List(Tensor) [M, 2]
-                   "strides": self.stride,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": stride_tensor   # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-
-## Multi-level pred layer
-class Yolov7AFDetPredLayer(nn.Module):
-    def __init__(self,
-                 cfg,
-                 cls_dim,
-                 reg_dim,
-                 ):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [AFDetPredLayer(cls_dim     = cls_dim,
-                            reg_dim     = reg_dim,
-                            stride      = cfg.out_stride[level],
-                            reg_max     = cfg.reg_max,
-                            num_classes = cfg.num_classes,
-                            num_coords  = 4 * cfg.reg_max)
-                            for level in range(cfg.num_levels)
-                            ])
-        ## proj conv
-        proj_init = torch.arange(cfg.reg_max, dtype=torch.float)
-        self.proj_conv = nn.Conv2d(cfg.reg_max, 1, kernel_size=1, bias=False).requires_grad_(False)
-        self.proj_conv.weight.data[:] = nn.Parameter(proj_init.view([1, cfg.reg_max, 1, 1]), requires_grad=False)
-
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.cfg.num_levels):
-            # -------------- Single-level prediction --------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # -------------- Decode bbox --------------
-            B, M = outputs["pred_reg"].shape[:2]
-            # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max]
-            delta_pred = outputs["pred_reg"].reshape([B, M, 4, self.cfg.reg_max])
-            # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-            delta_pred = delta_pred.permute(0, 3, 2, 1).contiguous()
-            # [B, reg_max, 4, M] -> [B, 1, 4, M]
-            delta_pred = self.proj_conv(F.softmax(delta_pred, dim=1))
-            # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-            delta_pred = delta_pred.view(B, 4, M).permute(0, 2, 1).contiguous()
-            ## tlbr -> xyxy
-            x1y1_pred = outputs["anchors"][None] - delta_pred[..., :2] * self.cfg.out_stride[level]
-            x2y2_pred = outputs["anchors"][None] + delta_pred[..., 2:] * self.cfg.out_stride[level]
-            box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-            # collect results
-            all_cls_preds.append(outputs["pred_cls"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_box_preds.append(box_pred)
-            all_anchors.append(outputs["anchors"])
-            all_strides.append(outputs["stride_tensor"])
-        
-        # output dict
-        outputs = {"pred_cls":      all_cls_preds,         # List(Tensor) [B, M, C]
-                   "pred_reg":      all_reg_preds,         # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box":      all_box_preds,         # List(Tensor) [B, M, 4]
-                   "anchors":       all_anchors,           # List(Tensor) [M, 2]
-                   "stride_tensor": all_strides,           # List(Tensor) [M, 1]
-                   "strides":       self.cfg.out_stride,   # List(Int) = [8, 16, 32]
-                   }
-
-        return outputs