yjh0410 2 rokov pred
rodič
commit
578dc295f4

+ 4 - 50
config/__init__.py

@@ -32,20 +32,6 @@ from .data_config.transform_config import (
     yolox_huge_trans_config,
     # SSD-Style
     ssd_trans_config,
-    # RTMDet-v1-Style
-    rtcdet_v1_pico_trans_config,
-    rtcdet_v1_nano_trans_config,
-    rtcdet_v1_small_trans_config,
-    rtcdet_v1_medium_trans_config,
-    rtcdet_v1_large_trans_config,
-    rtcdet_v1_huge_trans_config,
-    # RTMDet-v2-Style
-    rtcdet_v2_pico_trans_config,
-    rtcdet_v2_nano_trans_config,
-    rtcdet_v2_small_trans_config,
-    rtcdet_v2_medium_trans_config,
-    rtcdet_v2_large_trans_config,
-    rtcdet_v2_huge_trans_config,
 )
 
 def build_trans_config(trans_config='ssd'):
@@ -84,34 +70,6 @@ def build_trans_config(trans_config='ssd'):
     elif trans_config == 'yolox_huge':
         cfg = yolox_huge_trans_config
 
-    # RTMDetv1-style transform 
-    elif trans_config == 'rtcdet_v1_pico':
-        cfg = rtcdet_v1_pico_trans_config
-    elif trans_config == 'rtcdet_v1_nano':
-        cfg = rtcdet_v1_nano_trans_config
-    elif trans_config == 'rtcdet_v1_small':
-        cfg = rtcdet_v1_small_trans_config
-    elif trans_config == 'rtcdet_v1_medium':
-        cfg = rtcdet_v1_medium_trans_config
-    elif trans_config == 'rtcdet_v1_large':
-        cfg = rtcdet_v1_large_trans_config
-    elif trans_config == 'rtcdet_v1_huge':
-        cfg = rtcdet_v1_huge_trans_config
-
-    # RTMDetv2-style transform 
-    elif trans_config == 'rtcdet_v2_pico':
-        cfg = rtcdet_v2_pico_trans_config
-    elif trans_config == 'rtcdet_v2_nano':
-        cfg = rtcdet_v2_nano_trans_config
-    elif trans_config == 'rtcdet_v2_small':
-        cfg = rtcdet_v2_small_trans_config
-    elif trans_config == 'rtcdet_v2_medium':
-        cfg = rtcdet_v2_medium_trans_config
-    elif trans_config == 'rtcdet_v2_large':
-        cfg = rtcdet_v2_large_trans_config
-    elif trans_config == 'rtcdet_v2_huge':
-        cfg = rtcdet_v2_huge_trans_config
-
     print('Transform Config: {} \n'.format(cfg))
 
     return cfg
@@ -127,8 +85,7 @@ from .model_config.yolov5_config import yolov5_cfg
 from .model_config.yolov7_config import yolov7_cfg
 from .model_config.yolox_config import yolox_cfg
 ## My RTMDet series
-from .model_config.rtcdet_v1_config import rtcdet_v1_cfg
-from .model_config.rtcdet_v2_config import rtcdet_v2_cfg
+from .model_config.rtcdet_config import rtcdet_cfg
 
 
 def build_model_config(args):
@@ -155,12 +112,9 @@ def build_model_config(args):
     # YOLOX
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         cfg = yolox_cfg[args.model]
-    # My RTMDet-v1
-    elif args.model in ['rtcdet_v1_p', 'rtcdet_v1_n', 'rtcdet_v1_t', 'rtcdet_v1_s', 'rtcdet_v1_m', 'rtcdet_v1_l', 'rtcdet_v1_x']:
-        cfg = rtcdet_v1_cfg[args.model]
-    # My RTMDet-v2
-    elif args.model in ['rtcdet_v2_p', 'rtcdet_v2_n', 'rtcdet_v2_t', 'rtcdet_v2_s', 'rtcdet_v2_m', 'rtcdet_v2_l', 'rtcdet_v2_x']:
-        cfg = rtcdet_v2_cfg[args.model]
+    # RTCDet
+    elif args.model in ['rtcdet_p', 'rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
+        cfg = rtcdet_cfg[args.model]
 
     return cfg
 

+ 0 - 225
config/data_config/transform_config.py

@@ -243,228 +243,3 @@ ssd_trans_config = {
     'mixup_type': 'yolov5_mixup',
     'mixup_scale': [0.5, 1.5]
 }
-
-
-# ----------------------- RTMDet-v1's Transform -----------------------
-rtcdet_v1_huge_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.1, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-rtcdet_v1_large_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.1, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-rtcdet_v1_medium_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.1, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-rtcdet_v1_small_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.1, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-rtcdet_v1_nano_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.1,
-    'scale': [0.5, 1.5],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 0.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-rtcdet_v1_pico_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.1,
-    'scale': [0.5, 1.5],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 0.5,
-    'mixup_prob': 0.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolox_mixup',
-    'mixup_scale': [0.5, 1.5]   # "mixup_scale" is not used for YOLOv5MixUp
-}
-
-# ----------------------- RTMDet-v2's Transform -----------------------
-rtcdet_v2_huge_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}
-
-rtcdet_v2_large_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}
-
-rtcdet_v2_medium_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}
-
-rtcdet_v2_small_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 1.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}
-
-rtcdet_v2_nano_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 1.0,
-    'mixup_prob': 0.1,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}
-
-rtcdet_v2_pico_trans_config = {
-    'aug_type': 'yolov5',
-    # Basic Augment
-    'degrees': 0.0,
-    'translate': 0.2,
-    'scale': [0.5, 2.0],
-    'shear': 0.0,
-    'perspective': 0.0,
-    'hsv_h': 0.015,
-    'hsv_s': 0.7,
-    'hsv_v': 0.4,
-    # Mosaic & Mixup
-    'mosaic_prob': 0.5,
-    'mixup_prob': 0.0,
-    'mosaic_type': 'yolov5_mosaic',
-    'mixup_type': 'yolov5_mixup'
-}

+ 5 - 5
config/model_config/rtcdet_v2_config.py → config/model_config/rtcdet_config.py

@@ -1,8 +1,8 @@
 # RTCDet-v2 Config
 
 
-rtcdet_v2_cfg = {
-    'rtcdet_v2_s':{
+rtcdet_cfg = {
+    'rtcdet_s':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'elannet_v2',
@@ -15,7 +15,7 @@ rtcdet_v2_cfg = {
         'stride': [8, 16, 32],  # P3, P4, P5
         'max_stride': 32,
         ## Neck: SPP
-        'neck': 'sppf',
+        'neck': 'csp_sppf',
         'neck_expand_ratio': 0.5,
         'pooling_size': 5,
         'neck_act': 'silu',
@@ -58,7 +58,7 @@ rtcdet_v2_cfg = {
         'trainer_type': 'rtmdet',
     },
 
-    'rtcdet_v2_l':{
+    'rtcdet_l':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'elannet_v2',
@@ -71,7 +71,7 @@ rtcdet_v2_cfg = {
         'stride': [8, 16, 32],  # P3, P4, P5
         'max_stride': 32,
         ## Neck: SPP
-        'neck': 'sppf',
+        'neck': 'csp_sppf',
         'neck_expand_ratio': 0.5,
         'pooling_size': 5,
         'neck_act': 'silu',

+ 0 - 383
config/model_config/rtcdet_v1_config.py

@@ -1,383 +0,0 @@
-# RTCDet-v1 Config
-
-
-rtcdet_v1_cfg = {
-    'rtcdet_v1_p':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': True,
-        'width': 0.25,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': True,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': True,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': True,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'rtcdet_v1_pico',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'rtcdet_v1_nano',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_t':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.375,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'rtcdet_v1_nano',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_s':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.50,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'rtcdet_v1_small',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_m':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.75,
-        'depth': 0.67,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'rtcdet_v1_medium',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.25],   # 320 -> 800
-        'trans_type': 'rtcdet_v1_large',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-    'rtcdet_v1_x':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.25,
-        'depth': 1.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.25],   # 320 -> 800
-        'trans_type': 'rtcdet_v1_huge',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        'loss_dfl_weight': 1.0,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtmdet',
-    },
-
-}

+ 4 - 9
models/detectors/__init__.py

@@ -10,8 +10,7 @@ from .yolov4.build import build_yolov4
 from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
 # My RTCDet
-from .rtcdet_v1.build import build_rtcdet_v1
-from .rtcdet_v2.build import build_rtcdet_v2
+from .rtcdet.build import build_rtcdet
 from .yolox.build import build_yolox
 
 
@@ -50,13 +49,9 @@ def build_model(args,
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         model, criterion = build_yolox(
             args, model_cfg, device, num_classes, trainable, deploy)
-    # My RTCDet-v1
-    elif args.model in ['rtcdet_v1_p', 'rtcdet_v1_n', 'rtcdet_v1_t', 'rtcdet_v1_s', 'rtcdet_v1_m', 'rtcdet_v1_l', 'rtcdet_v1_x']:
-        model, criterion = build_rtcdet_v1(
-            args, model_cfg, device, num_classes, trainable, deploy)
-    # My RTCDet-v2
-    elif args.model in ['rtcdet_v2_p', 'rtcdet_v2_n', 'rtcdet_v2_t', 'rtcdet_v2_s', 'rtcdet_v2_m', 'rtcdet_v2_l', 'rtcdet_v2_x']:
-        model, criterion = build_rtcdet_v2(
+    # RTCDet
+    elif args.model in ['rtcdet_p', 'rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
+        model, criterion = build_rtcdet(
             args, model_cfg, device, num_classes, trainable, deploy)
 
     if trainable:

+ 23 - 0
models/detectors/rtcdet/README.md

@@ -0,0 +1,23 @@
+# RTCDet-v2: My Second Empirical Study of Real-Time Convolutional Object Detectors.
+
+|   Model  | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
+|----------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
+| RTCDet-N |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-T |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-S |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-M |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-L |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-X |  640  |       |                         |                    |                        |                   |                   |                    |  |
+
+|   Model  | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
+|----------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
+| RTCDet-P |  320  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-P |  416  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-P |  512  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+| RTCDet-P |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
+
+- For training, we train my RTCDet series series with 300 epochs on COCO.
+- For data augmentation, we use the large scale jitter (LSJ), Mosaic augmentation and Mixup augmentation, following the setting of [YOLOX](https://github.com/ultralytics/yolov5), but we remove the rotation transformation which is used in YOLOX's strong augmentation.
+- For optimizer, we use AdamW with weight decay 0.05 and base per image lr 0.001 / 64.
+- For learning rate scheduler, we use linear decay scheduler.
+- Due to my limited computing resources, I can not train `RTCDet-X` with the setting of `batch size=128`.

+ 2 - 2
models/detectors/rtcdet_v2/build.py → models/detectors/rtcdet/build.py

@@ -5,11 +5,11 @@ import torch
 import torch.nn as nn
 
 from .loss import build_criterion
-from .rtcdet_v2 import RTCDet
+from .rtcdet import RTCDet
 
 
 # build object detector
-def build_rtcdet_v2(args, cfg, device, num_classes=80, trainable=False, deploy=False):
+def build_rtcdet(args, cfg, device, num_classes=80, trainable=False, deploy=False):
     print('==============================')
     print('Build {} ...'.format(args.model.upper()))
         

+ 0 - 0
models/detectors/rtcdet_v2/loss.py → models/detectors/rtcdet/loss.py


+ 0 - 0
models/detectors/rtcdet_v2/matcher.py → models/detectors/rtcdet/matcher.py


+ 6 - 6
models/detectors/rtcdet_v2/rtcdet_v2.py → models/detectors/rtcdet/rtcdet.py

@@ -3,11 +3,11 @@ import torch
 import torch.nn as nn
 
 # --------------- Model components ---------------
-from .rtcdet_v2_backbone import build_backbone
-from .rtcdet_v2_neck import build_neck
-from .rtcdet_v2_pafpn import build_fpn
-from .rtcdet_v2_head import build_det_head
-from .rtcdet_v2_pred import build_pred_layer
+from .rtcdet_backbone import build_backbone
+from .rtcdet_neck import build_neck
+from .rtcdet_pafpn import build_fpn
+from .rtcdet_head import build_det_head
+from .rtcdet_pred import build_pred_layer
 
 # --------------- External components ---------------
 from utils.misc import multiclass_nms
@@ -42,7 +42,7 @@ class RTCDet(nn.Module):
         self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
 
         ## ----------- Neck: SPP -----------
-        self.neck = build_neck(cfg, feats_dim[-1], feats_dim[-1])
+        self.neck = build_neck(cfg, feats_dim[-1], feats_dim[-1]//2)
         feats_dim[-1] = self.neck.out_dim
         
         ## ----------- Neck: FPN -----------

+ 3 - 3
models/detectors/rtcdet_v2/rtcdet_v2_backbone.py → models/detectors/rtcdet/rtcdet_backbone.py

@@ -1,9 +1,9 @@
 import torch
 import torch.nn as nn
 try:
-    from .rtcdet_v2_basic import Conv, ELANBlock, DSBlock
+    from .rtcdet_basic import Conv, ELANBlock, DSBlock
 except:
-    from rtcdet_v2_basic import Conv, ELANBlock, DSBlock
+    from rtcdet_basic import Conv, ELANBlock, DSBlock
 
 
 model_urls = {
@@ -42,7 +42,7 @@ class ELANNetv2(nn.Module):
         )
         ## P2/4
         self.layer_2 = nn.Sequential(   
-            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=self.act_type, norm_type=self.norm_type, depthwise=self.depthwise),
+            DSBlock(self.feat_dims[0], self.feat_dims[1], act_type=self.act_type, norm_type=self.norm_type, depthwise=self.depthwise),
             ELANBlock(self.feat_dims[1], self.feat_dims[2], self.expand_ratio[0], self.branch_depths[0], True, self.act_type, self.norm_type, self.depthwise)
         )
         ## P3/8

+ 11 - 9
models/detectors/rtcdet_v2/rtcdet_v2_basic.py → models/detectors/rtcdet/rtcdet_basic.py

@@ -198,12 +198,12 @@ class ELANBlock(nn.Module):
         self.cv2 = Conv(in_dim, self.inter_dim, k=1, act_type=act_type, norm_type=norm_type)
         ## branch-3
         self.cv3 = nn.Sequential(*[
-            Conv(self.inter_dim, self.inter_dim, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            YoloBottleneck(self.inter_dim, self.inter_dim, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise)
             for _ in range(branch_depth)
         ])
         ## branch-4
         self.cv4 = nn.Sequential(*[
-            Conv(self.inter_dim, self.inter_dim, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            YoloBottleneck(self.inter_dim, self.inter_dim, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise)
             for _ in range(branch_depth)
         ])
         ## output proj
@@ -212,8 +212,8 @@ class ELANBlock(nn.Module):
     def forward(self, x):
         x1 = self.cv1(x)
         x2 = self.cv2(x)
-        x3 = self.cv3(x2) + x2 if self.shortcut else self.cv3(x2)
-        x4 = self.cv4(x3) + x3 if self.shortcut else self.cv4(x3)
+        x3 = self.cv3(x2)
+        x4 = self.cv4(x3)
 
         # [B, C, H, W] -> [B, 2C, H, W]
         out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
@@ -238,24 +238,26 @@ class ELANBlockFPN(nn.Module):
         ## branch-2
         self.cv2 = Conv(in_dim, self.inter_dim1, k=1, act_type=act_type, norm_type=norm_type)
         ## branch-3
+        self.cv3 = []
         for i in range(branch_depth):
             if i == 0:
-                self.cv3 = nn.Sequential(Conv(self.inter_dim1, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
+                self.cv3.append(YoloBottleneck(self.inter_dim1, self.inter_dim2, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise))
             else:
-                self.cv3.append(Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
+                self.cv3.append(YoloBottleneck(self.inter_dim2, self.inter_dim2, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise))
+        self.cv3 = nn.Sequential(*self.cv3)
         ## branch-4
         self.cv4 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            YoloBottleneck(self.inter_dim2, self.inter_dim2, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise)
             for _ in range(branch_depth)
         ])
         ## branch-5
         self.cv5 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            YoloBottleneck(self.inter_dim2, self.inter_dim2, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise)
             for _ in range(branch_depth)
         ])
         ## branch-6
         self.cv6 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            YoloBottleneck(self.inter_dim2, self.inter_dim2, [1, 3], 1.0, shortcut, act_type, norm_type, depthwise)
             for _ in range(branch_depth)
         ])
         ## output proj

+ 2 - 2
models/detectors/rtcdet_v2/rtcdet_v2_head.py → models/detectors/rtcdet/rtcdet_head.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .rtcdet_v2_basic import Conv
+    from .rtcdet_basic import Conv
 except:
-    from rtcdet_v2_basic import Conv
+    from rtcdet_basic import Conv
 
 
 # Single-level Head

+ 15 - 7
models/detectors/rtcdet_v2/rtcdet_v2_neck.py → models/detectors/rtcdet/rtcdet_neck.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .rtcdet_v2_basic import Conv
+    from .rtcdet_basic import Conv
 except:
-    from rtcdet_v2_basic import Conv
+    from rtcdet_basic import Conv
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -27,6 +27,7 @@ class SPPF(nn.Module):
 
         return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
 
+
 # SPPF block with CSP module
 class SPPFBlockCSP(nn.Module):
     """
@@ -38,20 +39,27 @@ class SPPFBlockCSP(nn.Module):
         self.out_dim = out_dim
         self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
         self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.spp = SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0)
+        self.m = nn.Sequential(
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
+                 depthwise=cfg['neck_depthwise']),
+            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
+                 depthwise=cfg['neck_depthwise'])
+        )
         self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
 
         
     def forward(self, x):
         x1 = self.cv1(x)
         x2 = self.cv2(x)
-        x3 = self.spp(x2)
+        x3 = self.m(x2)
         y = self.cv3(torch.cat([x1, x3], dim=1))
 
         return y
 
 
-# build neck
 def build_neck(cfg, in_dim, out_dim):
     model = cfg['neck']
     print('==============================')
@@ -77,8 +85,8 @@ if __name__ == '__main__':
         'neck_norm': 'BN',
         'neck_depthwise': False,
     }
-    in_dim = 2048
-    out_dim = 2048
+    in_dim = 1024
+    out_dim = 512
     # Head-1
     model = build_neck(cfg, in_dim, out_dim)
     feat = torch.randn(1, in_dim, 20, 20)

+ 21 - 20
models/detectors/rtcdet_v2/rtcdet_v2_pafpn.py → models/detectors/rtcdet/rtcdet_pafpn.py

@@ -3,14 +3,14 @@ import torch.nn as nn
 import torch.nn.functional as F
 
 try:
-    from .rtcdet_v2_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
+    from .rtcdet_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
 except:
-    from rtcdet_v2_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
+    from rtcdet_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
 
 
 # RTCDet-Style PaFPN
 class RTCDetPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
+    def __init__(self, cfg, in_dims=[512, 1024, 512], out_dim=None):
         super(RTCDetPaFPN, self).__init__()
         # --------------------------- Basic Parameters ---------------------------
         self.in_dims = in_dims
@@ -25,57 +25,58 @@ class RTCDetPaFPN(nn.Module):
                 
         # --------------------------- Top-down FPN ---------------------------
         ## P5 -> P4
-        self.reduce_layer_1 = build_reduce_layer(cfg, self.fpn_dims[2], self.fpn_dims[2]//2)
-        self.top_down_layer_1 = build_fpn_block(cfg, self.fpn_dims[1] + self.fpn_dims[2]//2, self.fpn_dims[1])
+        self.reduce_layer_1 = build_reduce_layer(cfg, in_dims[2], round(256*cfg['width']))
+        self.reduce_layer_2 = build_reduce_layer(cfg, in_dims[1], round(256*cfg['width']))
+        self.top_down_layer_1 = build_fpn_block(cfg, round(256*cfg['width']) + round(256*cfg['width']), round(256*cfg['width']))
 
         ## P4 -> P3
-        self.reduce_layer_2 = build_reduce_layer(cfg, self.fpn_dims[1], self.fpn_dims[1]//2)
-        self.top_down_layer_2 = build_fpn_block(cfg, self.fpn_dims[0] + self.fpn_dims[1]//2, self.fpn_dims[0])
+        self.reduce_layer_3 = build_reduce_layer(cfg, round(256*cfg['width']), round(128*cfg['width']))
+        self.reduce_layer_4 = build_reduce_layer(cfg, in_dims[0], round(128*cfg['width']))
+        self.top_down_layer_2 = build_fpn_block(cfg, round(128*cfg['width']) + round(128*cfg['width']), round(128*cfg['width']))
 
         # --------------------------- Bottom-up FPN ---------------------------
         ## P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, self.fpn_dims[0], self.fpn_dims[0])
-        self.bottom_up_layer_1 = build_fpn_block(cfg, self.fpn_dims[0] + self.fpn_dims[1]//2, self.fpn_dims[1])
+        self.downsample_layer_1 = build_downsample_layer(cfg, round(128*cfg['width']), round(256*cfg['width']))
+        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*cfg['width']) + round(256*cfg['width']), round(256*cfg['width']))
 
         ## P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, self.fpn_dims[1], self.fpn_dims[1])
-        self.bottom_up_layer_2 = build_fpn_block(cfg, self.fpn_dims[1] + self.fpn_dims[2]//2, self.fpn_dims[2])
+        self.downsample_layer_2 = build_downsample_layer(cfg, round(256*cfg['width']), round(512*cfg['width']))
+        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*cfg['width']) + in_dims[2], round(512*cfg['width']))
                 
         # --------------------------- Output proj ---------------------------
         if out_dim is not None:
             self.out_layers = nn.ModuleList([
                 Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                for in_dim in self.fpn_dims])
+                for in_dim in [round(128*cfg['width']), round(256*cfg['width']), round(512*cfg['width'])]])
             self.out_dim = [out_dim] * 3
         else:
             self.out_layers = None
-            self.out_dim = self.fpn_dims
+            self.out_dim = [round(128*cfg['width']), round(256*cfg['width']), round(512*cfg['width'])]
 
 
     def forward(self, fpn_feats):
-        fpn_feats = [layer(feat) for feat, layer in zip(fpn_feats, self.input_projs)]
         c3, c4, c5 = fpn_feats
 
         # Top down
         ## P5 -> P4
         c6 = self.reduce_layer_1(c5)
         c7 = F.interpolate(c6, scale_factor=2.0)
-        c8 = torch.cat([c7, c4], dim=1)
+        c8 = torch.cat([c7, self.reduce_layer_2(c4)], dim=1)
         c9 = self.top_down_layer_1(c8)
         ## P4 -> P3
-        c10 = self.reduce_layer_2(c9)
+        c10 = self.reduce_layer_3(c9)
         c11 = F.interpolate(c10, scale_factor=2.0)
-        c12 = torch.cat([c11, c3], dim=1)
+        c12 = torch.cat([c11, self.reduce_layer_4(c3)], dim=1)
         c13 = self.top_down_layer_2(c12)
 
         # Bottom up
         ## p3 -> P4
         c14 = self.downsample_layer_1(c13)
-        c15 = torch.cat([c14, c10], dim=1)
+        c15 = torch.cat([c14, c9], dim=1)
         c16 = self.bottom_up_layer_1(c15)
         ## P4 -> P5
         c17 = self.downsample_layer_2(c16)
-        c18 = torch.cat([c17, c6], dim=1)
+        c18 = torch.cat([c17, c5], dim=1)
         c19 = self.bottom_up_layer_2(c18)
 
         out_feats = [c13, c16, c19] # [P3, P4, P5]
@@ -115,7 +116,7 @@ if __name__ == '__main__':
         'fpn_norm': 'BN',
         'fpn_depthwise': False,
     }
-    fpn_dims = [512, 1024, 1024]
+    fpn_dims = [512, 1024, 512]
     out_dim = 256
     # Head-1
     model = build_fpn(cfg, fpn_dims, out_dim)

+ 0 - 0
models/detectors/rtcdet_v2/rtcdet_v2_pred.py → models/detectors/rtcdet/rtcdet_pred.py


+ 0 - 16
models/detectors/rtcdet_v1/README.md

@@ -1,16 +0,0 @@
-# RTCDet-v1: My First Empirical Study of Real-Time Convolutional Object Detectors.
-
-|   Model    | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|------------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDetv1-N |  640  | 8xb16 |         35.7            |        53.8        |          35.6          |        53.8       |      9.1          |        2.4         | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/rtcdet_v1_n_coco.pth) |
-| RTCDetv1-T |  640  | 8xb16 |         40.5            |        59.1        |          40.3          |        59.1       |      19.0         |        5.1         | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/rtcdet_v1_t_coco.pth) |
-| RTCDetv1-S |  640  | 8xb16 |         43.6            |        62.6        |          43.3          |        62.6       |      33.6         |        9.0         | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/rtcdet_v1_s_coco.pth) |
-| RTCDetv1-M |  640  | 8xb16 |         48.3            |        67.0        |          48.1          |        66.9       |      87.4         |        23.6        | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/rtcdet_v1_m_coco.pth) |
-| RTCDetv1-L |  640  | 8xb16 |         50.2            |        68.6        |          50.0          |        68.4       |      176.6        |        47.6        | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/rtcdet_v1_l_coco.pth) |
-| RTCDetv1-X |  640  | 8xb12 |                         |                    |                        |                   |                   |                    |  |
-
-- For training, we train my RTCDetv1 series series with 300 epochs on COCO.
-- For data augmentation, we use the large scale jitter (LSJ), Mosaic augmentation and Mixup augmentation, following the setting of [YOLOX](https://github.com/ultralytics/yolov5), but we remove the rotation transformation which is used in YOLOX's strong augmentation.
-- For optimizer, we use AdamW with weight decay 0.05 and base per image lr 0.001 / 64.
-- For learning rate scheduler, we use linear decay scheduler.
-- Due to my limited computing resources, I can not train `RTCDetv1-X` with the setting of `batch size=128`.

+ 0 - 39
models/detectors/rtcdet_v1/build.py

@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .rtcdet_v1 import RTCDet
-
-
-# build object detector
-def build_rtcdet_v1(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-        
-    # -------------- Build RTCDet --------------
-    model = RTCDet(
-        cfg=cfg,
-        device=device, 
-        num_classes=num_classes,
-        trainable=trainable,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        topk=args.topk,
-        deploy=deploy
-        )
-
-    # -------------- Initialize RTCDet --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes)
-    return model, criterion

+ 0 - 212
models/detectors/rtcdet_v1/loss.py

@@ -1,212 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-from utils.box_ops import  bbox2dist, get_ious
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import AlignedSimOTA
-
-
-class Criterion(object):
-    def __init__(self, 
-                 cfg, 
-                 device, 
-                 num_classes=80):
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        # loss weight
-        self.loss_obj_weight = cfg['loss_obj_weight']
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_box_weight = cfg['loss_box_weight']
-        self.loss_dfl_weight = cfg['loss_dfl_weight']
-        # matcher
-        matcher_config = cfg['matcher']
-        self.matcher = AlignedSimOTA(
-            num_classes=num_classes,
-            center_sampling_radius=matcher_config['center_sampling_radius'],
-            topk_candidate=matcher_config['topk_candicate']
-            )
-
-
-    def loss_objectness(self, pred_obj, gt_obj):
-        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
-
-        return loss_obj
-    
-
-    def loss_classes(self, pred_cls, gt_label):
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
-
-        return loss_cls
-
-
-    def loss_bboxes(self, pred_box, gt_box):
-        # regression loss
-        ious = get_ious(pred_box, gt_box, "xyxy", 'giou')
-        loss_box = 1.0 - ious
-
-        return loss_box
-
-
-    def loss_dfl(self, pred_reg, gt_box, anchor, stride):
-        # rescale coords by stride
-        gt_box_s = gt_box / stride
-        anchor_s = anchor / stride
-
-        # compute deltas
-        gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.cfg['reg_max'] - 1)
-
-        gt_left = gt_ltrb_s.to(torch.long)
-        gt_right = gt_left + 1
-
-        weight_left = gt_right.to(torch.float) - gt_ltrb_s
-        weight_right = 1 - weight_left
-
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_reg.view(-1, self.cfg['reg_max']),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_reg.view(-1, self.cfg['reg_max']),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss_dfl = (loss_left + loss_right).mean(-1, keepdim=True)
-            
-        return loss_dfl
-    
-    
-    def __call__(self, outputs, targets, epoch=0):        
-        """
-            outputs['pred_obj']: List(Tensor) [B, M, 1]
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        anchors = outputs['anchors']
-        # preds: [B, M, C]
-        obj_preds = torch.cat(outputs['pred_obj'], dim=1)
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-
-        # --------------- label assignment ---------------
-        obj_targets = []
-        cls_targets = []
-        box_targets = []
-        fg_masks = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
-                num_anchors = sum([ab.shape[0] for ab in anchors])
-                # There is no valid gt
-                cls_target = obj_preds.new_zeros((0, self.num_classes))
-                box_target = obj_preds.new_zeros((0, 4))
-                obj_target = obj_preds.new_zeros((num_anchors, 1))
-                fg_mask = obj_preds.new_zeros(num_anchors).bool()
-            else:
-                (
-                    fg_mask,
-                    assigned_labels,
-                    assigned_ious,
-                    assigned_indexs
-                ) = self.matcher(
-                    fpn_strides = fpn_strides,
-                    anchors = anchors,
-                    pred_obj = obj_preds[batch_idx],
-                    pred_cls = cls_preds[batch_idx], 
-                    pred_box = box_preds[batch_idx],
-                    tgt_labels = tgt_labels,
-                    tgt_bboxes = tgt_bboxes
-                    )
-
-                obj_target = fg_mask.unsqueeze(-1)
-                cls_target = F.one_hot(assigned_labels.long(), self.num_classes)
-                cls_target = cls_target * assigned_ious.unsqueeze(-1)
-                box_target = tgt_bboxes[assigned_indexs]
-
-            cls_targets.append(cls_target)
-            box_targets.append(box_target)
-            obj_targets.append(obj_target)
-            fg_masks.append(fg_mask)
-
-        cls_targets = torch.cat(cls_targets, 0)
-        box_targets = torch.cat(box_targets, 0)
-        obj_targets = torch.cat(obj_targets, 0)
-        fg_masks = torch.cat(fg_masks, 0)
-        num_fgs = fg_masks.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ Objecntness loss ------------------
-        loss_obj = self.loss_objectness(obj_preds.view(-1, 1), obj_targets.float())
-        loss_obj = loss_obj.sum() / num_fgs
-        
-        # ------------------ Classification loss ------------------
-        cls_preds_pos = cls_preds.view(-1, self.num_classes)[fg_masks]
-        loss_cls = self.loss_classes(cls_preds_pos, cls_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets)
-        loss_box = loss_box.sum() / num_fgs
-
-        # ------------------ Distribution focal loss  ------------------
-        ## process anchors
-        anchors = torch.cat(anchors, dim=0)
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
-        ## process stride tensors
-        strides = torch.cat(outputs['stride_tensor'], dim=0)
-        strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
-        ## fg preds
-        reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
-        anchors_pos = anchors[fg_masks]
-        strides_pos = strides[fg_masks]
-        ## compute dfl
-        loss_dfl = self.loss_dfl(reg_preds_pos, box_targets, anchors_pos, strides_pos)
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_obj_weight * loss_obj + \
-                 self.loss_cls_weight * loss_cls + \
-                 self.loss_box_weight * loss_box + \
-                 self.loss_dfl_weight * loss_dfl
-
-        loss_dict = dict(
-                loss_obj = loss_obj,
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                loss_dfl = loss_dfl,
-                losses = losses
-        )
-
-        return loss_dict
-    
-
-def build_criterion(cfg, device, num_classes):
-    criterion = Criterion(
-        cfg=cfg,
-        device=device,
-        num_classes=num_classes
-        )
-
-    return criterion
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 188
models/detectors/rtcdet_v1/matcher.py

@@ -1,188 +0,0 @@
-# ---------------------------------------------------------------------
-# Copyright (c) Megvii Inc. All rights reserved.
-# ---------------------------------------------------------------------
-
-
-import torch
-import torch.nn.functional as F
-from utils.box_ops import *
-
-
-class AlignedSimOTA(object):
-    """
-        This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
-    """
-    def __init__(self, num_classes, center_sampling_radius, topk_candidate ):
-        self.num_classes = num_classes
-        self.center_sampling_radius = center_sampling_radius
-        self.topk_candidate = topk_candidate
-
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_obj, 
-                 pred_cls, 
-                 pred_box, 
-                 tgt_labels,
-                 tgt_bboxes):
-        # [M,]
-        strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        anchors = torch.cat(anchors, dim=0)
-        num_anchor = anchors.shape[0]        
-        num_gt = len(tgt_labels)
-
-        # ----------------------- Find inside points -----------------------
-        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
-            tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
-        obj_preds = pred_obj[fg_mask].float()   # [Mp, 1]
-        cls_preds = pred_cls[fg_mask].float()   # [Mp, C]
-        box_preds = pred_box[fg_mask].float()   # [Mp, 4]
-
-        # ----------------------- Reg cost -----------------------
-        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds)      # [N, Mp]
-        reg_cost = -torch.log(pair_wise_ious + 1e-8)            # [N, Mp]
-
-        # ----------------------- Cls cost -----------------------
-        with torch.cuda.amp.autocast(enabled=False):
-            # [Mp, C]
-            score_preds = torch.sqrt(obj_preds.sigmoid_()* cls_preds.sigmoid_())
-            # [N, Mp, C]
-            score_preds = score_preds.unsqueeze(0).repeat(num_gt, 1, 1)
-            # prepare cls_target
-            cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
-            cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
-            cls_targets *= pair_wise_ious.unsqueeze(-1)  # iou-aware
-            # [N, Mp]
-            cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
-        del score_preds
-
-        #----------------------- Dynamic K-Matching -----------------------
-        cost_matrix = (
-            cls_cost
-            + 3.0 * reg_cost
-            + 100000.0 * (~is_in_boxes_and_center)
-        ) # [N, Mp]
-
-        (
-            assigned_labels,         # [num_fg,]
-            assigned_ious,           # [num_fg,]
-            assigned_indexs,         # [num_fg,]
-        ) = self.dynamic_k_matching(
-            cost_matrix,
-            pair_wise_ious,
-            tgt_labels,
-            num_gt,
-            fg_mask
-            )
-        del cls_cost, cost_matrix, pair_wise_ious, reg_cost
-
-        return fg_mask, assigned_labels, assigned_ious, assigned_indexs
-
-
-    def get_in_boxes_info(
-        self,
-        gt_bboxes,   # [N, 4]
-        anchors,     # [M, 2]
-        strides,     # [M,]
-        num_anchors, # M
-        num_gt,      # N
-        ):
-        # anchor center
-        x_centers = anchors[:, 0]
-        y_centers = anchors[:, 1]
-
-        # [M,] -> [1, M] -> [N, M]
-        x_centers = x_centers.unsqueeze(0).repeat(num_gt, 1)
-        y_centers = y_centers.unsqueeze(0).repeat(num_gt, 1)
-
-        # [N,] -> [N, 1] -> [N, M]
-        gt_bboxes_l = gt_bboxes[:, 0].unsqueeze(1).repeat(1, num_anchors) # x1
-        gt_bboxes_t = gt_bboxes[:, 1].unsqueeze(1).repeat(1, num_anchors) # y1
-        gt_bboxes_r = gt_bboxes[:, 2].unsqueeze(1).repeat(1, num_anchors) # x2
-        gt_bboxes_b = gt_bboxes[:, 3].unsqueeze(1).repeat(1, num_anchors) # y2
-
-        b_l = x_centers - gt_bboxes_l
-        b_r = gt_bboxes_r - x_centers
-        b_t = y_centers - gt_bboxes_t
-        b_b = gt_bboxes_b - y_centers
-        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
-
-        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
-        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
-        # in fixed center
-        center_radius = self.center_sampling_radius
-
-        # [N, 2]
-        gt_centers = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) * 0.5
-        
-        # [1, M]
-        center_radius_ = center_radius * strides.unsqueeze(0)
-
-        gt_bboxes_l = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # x1
-        gt_bboxes_t = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # y1
-        gt_bboxes_r = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # x2
-        gt_bboxes_b = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # y2
-
-        c_l = x_centers - gt_bboxes_l
-        c_r = gt_bboxes_r - x_centers
-        c_t = y_centers - gt_bboxes_t
-        c_b = gt_bboxes_b - y_centers
-        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
-        is_in_centers = center_deltas.min(dim=-1).values > 0.0
-        is_in_centers_all = is_in_centers.sum(dim=0) > 0
-
-        # in boxes and in centers
-        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
-
-        is_in_boxes_and_center = (
-            is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
-        )
-        return is_in_boxes_anchor, is_in_boxes_and_center
-    
-    
-    def dynamic_k_matching(
-        self, 
-        cost, 
-        pair_wise_ious, 
-        gt_classes, 
-        num_gt, 
-        fg_mask
-        ):
-        # Dynamic K
-        # ---------------------------------------------------------------
-        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
-
-        ious_in_boxes_matrix = pair_wise_ious
-        n_candidate_k = min(self.topk_candidate, ious_in_boxes_matrix.size(1))
-        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-        dynamic_ks = dynamic_ks.tolist()
-        for gt_idx in range(num_gt):
-            _, pos_idx = torch.topk(
-                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False
-            )
-            matching_matrix[gt_idx][pos_idx] = 1
-
-        del topk_ious, dynamic_ks, pos_idx
-
-        anchor_matching_gt = matching_matrix.sum(0)
-        if (anchor_matching_gt > 1).sum() > 0:
-            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
-            matching_matrix[:, anchor_matching_gt > 1] *= 0
-            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-
-        fg_mask[fg_mask.clone()] = fg_mask_inboxes
-
-        assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        assigned_labels = gt_classes[assigned_indexs]
-
-        assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
-            fg_mask_inboxes
-        ]
-        return assigned_labels, assigned_ious, assigned_indexs
-    

+ 0 - 181
models/detectors/rtcdet_v1/rtcdet_v1.py

@@ -1,181 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .rtcdet_v1_backbone import build_backbone
-from .rtcdet_v1_neck import build_neck
-from .rtcdet_v1_pafpn import build_fpn
-from .rtcdet_v1_head import build_det_head
-from .rtcdet_v1_pred import build_pred_layer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# My RTCDet
-class RTCDet(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes = 20, 
-                 conf_thresh = 0.05,
-                 nms_thresh = 0.6,
-                 trainable = False, 
-                 topk = 1000,
-                 deploy = False):
-        super(RTCDet, self).__init__()
-        # ---------------------- Basic Parameters ----------------------
-        self.cfg = cfg
-        self.device = device
-        self.stride = cfg['stride']
-        self.reg_max = cfg['reg_max']
-        self.num_classes = num_classes
-        self.trainable = trainable
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        self.deploy = deploy
-        self.head_dim = round(256*cfg['width'])
-        
-        # ---------------------- Network Parameters ----------------------
-        ## ----------- Backbone -----------
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
-
-        ## ----------- Neck: SPP -----------
-        self.neck = build_neck(cfg, feats_dim[-1], feats_dim[-1])
-        feats_dim[-1] = self.neck.out_dim
-        
-        ## ----------- Neck: FPN -----------
-        self.fpn = build_fpn(cfg, feats_dim, round(256*cfg['width']), True)
-        self.fpn_dims = self.fpn.out_dim
-
-        ## ----------- Heads -----------
-        self.det_heads = build_det_head(
-            cfg, self.fpn_dims, self.head_dim, num_classes, num_levels=len(self.stride))
-
-        ## ----------- Preds -----------
-        self.pred_layers = build_pred_layer(
-            self.det_heads.cls_head_dim, self.det_heads.reg_head_dim,
-            self.stride, num_classes, num_coords=4, num_levels=len(self.stride))
-
-
-    ## post-process
-    def post_process(self, obj_preds, cls_preds, box_preds):
-        """
-        Input:
-            obj_preds: List(Tensor) [[H x W, 1], ...]
-            cls_preds: List(Tensor) [[H x W, C], ...]
-            box_preds: List(Tensor) [[H x W, 4], ...]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for obj_pred_i, cls_pred_i, box_pred_i in zip(obj_preds, cls_preds, box_preds):
-            obj_pred_i = obj_pred_i[0]
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            
-            # (H x W x KA x C,)
-            scores_i = (torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid())).flatten()
-
-            # Keep top k top scoring indices only.
-            num_topk = min(self.topk, box_pred_i.size(0))
-
-            # torch.sort is actually faster than .topk (at least on GPUs)
-            predicted_prob, topk_idxs = scores_i.sort(descending=True)
-            topk_scores = predicted_prob[:num_topk]
-            topk_idxs = topk_idxs[:num_topk]
-
-            # filter out the proposals with low confidence score
-            keep_idxs = topk_scores > self.conf_thresh
-            scores = topk_scores[keep_idxs]
-            topk_idxs = topk_idxs[keep_idxs]
-
-            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-            labels = topk_idxs % self.num_classes
-
-            bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores)
-        labels = torch.cat(all_labels)
-        bboxes = torch.cat(all_bboxes)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
-
-        return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred_layers(cls_feats, reg_feats)
-
-        all_obj_preds = outputs['pred_obj']
-        all_cls_preds = outputs['pred_cls']
-        all_box_preds = outputs['pred_box']
-
-        if self.deploy:
-            obj_preds = torch.cat(all_obj_preds, dim=1)[0]
-            cls_preds = torch.cat(all_cls_preds, dim=1)[0]
-            box_preds = torch.cat(all_box_preds, dim=1)[0]
-            scores = torch.sqrt(obj_preds.sigmoid() * cls_preds.sigmoid())
-            bboxes = box_preds
-            # [n_anchors_all, 4 + C]
-            outputs = torch.cat([bboxes, scores], dim=-1)
-
-            return outputs
-        else:
-            # post process
-            bboxes, scores, labels = self.post_process(
-                all_obj_preds, all_cls_preds, all_box_preds)
-        
-            return bboxes, scores, labels
-
-
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # ---------------- Backbone ----------------
-            pyramid_feats = self.backbone(x)
-
-            # ---------------- Neck: SPP ----------------
-            pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-            # ---------------- Neck: PaFPN ----------------
-            pyramid_feats = self.fpn(pyramid_feats)
-
-            # ---------------- Heads ----------------
-            cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-            # ---------------- Preds ----------------
-            outputs = self.pred_layers(cls_feats, reg_feats)
-            
-            return outputs 
-        

+ 0 - 154
models/detectors/rtcdet_v1/rtcdet_v1_backbone.py

@@ -1,154 +0,0 @@
-import torch
-import torch.nn as nn
-try:
-    from .rtcdet_v1_basic import Conv, ELANBlock, DownSample
-except:
-    from rtcdet_v1_basic import Conv, ELANBlock, DownSample
-
-
-
-model_urls = {
-    'elannet_nano': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_nano.pth",
-    'elannet_tiny': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_tiny.pth",
-    'elannet_small': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_small.pth",
-    'elannet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_medium.pth",
-    'elannet_large': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_large.pth",
-    'elannet_huge': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_huge.pth",
-}
-
-
-# ---------------------------- Backbones ----------------------------
-# ELANNet-P5
-class ELANNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANNet, self).__init__()
-        self.feat_dims = [int(512 * width), int(1024 * width), int(1024 * width)]
-        
-        # P1/2
-        self.layer_1 = nn.Sequential(
-            Conv(3, int(64*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            Conv(int(64*width), int(64*width), k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P2/4
-        self.layer_2 = nn.Sequential(   
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),             
-            ELANBlock(in_dim=int(128*width), out_dim=int(256*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P3/8
-        self.layer_3 = nn.Sequential(
-            DownSample(in_dim=int(256*width), out_dim=int(256*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(256*width), out_dim=int(512*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P4/16
-        self.layer_4 = nn.Sequential(
-            DownSample(in_dim=int(512*width), out_dim=int(512*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(512*width), out_dim=int(1024*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P5/32
-        self.layer_5 = nn.Sequential(
-            DownSample(in_dim=int(1024*width), out_dim=int(1024*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(1024*width), out_dim=int(1024*width), expand_ratio=0.25, depth=depth,
-                    act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELANNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_dpw']
-        )
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_nano')
-        elif cfg['width'] == 0.375 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_tiny')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='elannet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='elannet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='elannet_huge')
-    feat_dims = backbone.feat_dims
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 184
models/detectors/rtcdet_v1/rtcdet_v1_basic.py

@@ -1,184 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-# ---------------------------- 2D CNN ----------------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-
-# Basic conv layer
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        p = p if d == 1 else d
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# ---------------------------- Modified YOLOv7's Modules ----------------------------
-## ELANBlock for Backbone
-class ELANBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANBlock, self).__init__()
-        if isinstance(expand_ratio, float):
-            inter_dim = int(in_dim * expand_ratio)
-            inter_dim2 = inter_dim
-        elif isinstance(expand_ratio, list):
-            assert len(expand_ratio) == 2
-            e1, e2 = expand_ratio
-            inter_dim = int(in_dim * e1)
-            inter_dim2 = int(inter_dim * e2)
-        # branch-1
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-2
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-3
-        for idx in range(round(3*depth)):
-            if idx == 0:
-                cv3 = [Conv(inter_dim, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cv3.append(Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*cv3)
-        # branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(round(3*depth))
-        ])
-        # output
-        self.out = Conv(inter_dim*2 + inter_dim2*2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        """
-        Input:
-            x: [B, C_in, H, W]
-        Output:
-            out: [B, C_out, H, W]
-        """
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-## DownSample
-class DownSample(nn.Module):
-    def __init__(self, in_dim, out_dim, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.mp = nn.MaxPool2d((2, 2), 2)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = nn.Sequential(
-            Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
-            Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-    def forward(self, x):
-        x1 = self.cv1(self.mp(x))
-        x2 = self.cv2(x)
-        out = torch.cat([x1, x2], dim=1)
-
-        return out
-
-
-# ---------------------------- FPN Modules ----------------------------
-## build fpn's core block
-def build_fpn_block(cfg, in_dim, out_dim):
-    if cfg['fpn_core_block'] == 'elanblock':
-        layer = ELANBlock(in_dim=in_dim,
-                          out_dim=out_dim,
-                          expand_ratio=[0.5, 0.5],
-                          depth=cfg['depth'],
-                          act_type=cfg['fpn_act'],
-                          norm_type=cfg['fpn_norm'],
-                          depthwise=cfg['fpn_depthwise']
-                          )
-        
-    return layer
-
-## build fpn's reduce layer
-def build_reduce_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_reduce_layer'] == 'conv':
-        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer
-
-## build fpn's downsample layer
-def build_downsample_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_downsample_layer'] == 'conv':
-        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-    elif cfg['fpn_downsample_layer'] == 'dsblock':
-        layer = DownSample(in_dim, out_dim, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer

+ 0 - 117
models/detectors/rtcdet_v1/rtcdet_v1_head.py

@@ -1,117 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .rtcdet_v1_basic import Conv
-
-
-# Single-level Head
-class SingleLevelHead(nn.Module):
-    def __init__(self, in_dim, out_dim, num_classes, num_cls_head, num_reg_head, act_type, norm_type, depthwise):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_classes = num_classes
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = max(out_dim, num_classes)
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                        act_type=act_type,
-                        norm_type=norm_type,
-                        depthwise=depthwise)
-                        )      
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = out_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-
-# Multi-level Head
-class MultiLevelHead(nn.Module):
-    def __init__(self, cfg, in_dims, out_dim, num_classes=80, num_levels=3):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SingleLevelHead(
-                in_dims[level],
-                out_dim,
-                num_classes,
-                cfg['num_cls_head'],
-                cfg['num_reg_head'],
-                cfg['head_act'],
-                cfg['head_norm'],
-                cfg['head_depthwise'])
-                for level in range(num_levels)
-            ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.num_classes = num_classes
-
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-    
-
-# build detection head
-def build_det_head(cfg, in_dim, out_dim, num_classes=80, num_levels=3):
-    if cfg['head'] == 'decoupled_head':
-        head = MultiLevelHead(cfg, in_dim, out_dim, num_classes, num_levels) 
-
-    return head

+ 0 - 72
models/detectors/rtcdet_v1/rtcdet_v1_neck.py

@@ -1,72 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .rtcdet_v1_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-# build neck
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-
-    return neck
-        

+ 0 - 98
models/detectors/rtcdet_v1/rtcdet_v1_pafpn.py

@@ -1,98 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .rtcdet_v1_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
-
-
-# RTCDet-Style PaFPN
-class RTCDetPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[512, 1024, 1024], out_dim=None, input_proj=False):
-        super(RTCDetPaFPN, self).__init__()
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims
-        if input_proj:
-            self.fpn_dims = [round(256*cfg['width']), round(512*cfg['width']), round(1024*cfg['width'])]
-        else:
-            self.fpn_dims = in_dims
-
-        # --------------------------- Input proj ---------------------------
-        self.input_projs = nn.ModuleList([nn.Conv2d(in_dim, fpn_dim, kernel_size=1)
-                                          for in_dim, fpn_dim in zip(in_dims, self.fpn_dims)])
-        
-        # --------------------------- Top-down FPN---------------------------
-        ## P5 -> P4
-        self.reduce_layer_1 = build_reduce_layer(cfg, self.fpn_dims[2], self.fpn_dims[2]//2)
-        self.top_down_layer_1 = build_fpn_block(cfg, self.fpn_dims[1] + self.fpn_dims[2]//2, self.fpn_dims[1])
-
-        ## P4 -> P3
-        self.reduce_layer_2 = build_reduce_layer(cfg, self.fpn_dims[1], self.fpn_dims[1]//2)
-        self.top_down_layer_2 = build_fpn_block(cfg, self.fpn_dims[0] + self.fpn_dims[1]//2, self.fpn_dims[0])
-
-        # --------------------------- Bottom-up FPN ---------------------------
-        ## P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, self.fpn_dims[0], self.fpn_dims[0])
-        self.bottom_up_layer_1 = build_fpn_block(cfg, self.fpn_dims[0] + self.fpn_dims[1]//2, self.fpn_dims[1])
-
-        ## P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, self.fpn_dims[1], self.fpn_dims[1])
-        self.bottom_up_layer_2 = build_fpn_block(cfg, self.fpn_dims[1] + self.fpn_dims[2]//2, self.fpn_dims[2])
-                
-        # --------------------------- Output proj ---------------------------
-        if out_dim is not None:
-            self.out_layers = nn.ModuleList([
-                Conv(in_dim, out_dim, k=1,
-                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                     for in_dim in self.fpn_dims
-                     ])
-            self.out_dim = [out_dim] * 3
-        else:
-            self.out_layers = None
-            self.out_dim = self.fpn_dims
-
-
-    def forward(self, fpn_feats):
-        fpn_feats = [layer(feat) for feat, layer in zip(fpn_feats, self.input_projs)]
-        c3, c4, c5 = fpn_feats
-
-        # Top down
-        ## P5 -> P4
-        c6 = self.reduce_layer_1(c5)
-        c7 = F.interpolate(c6, scale_factor=2.0)
-        c8 = torch.cat([c7, c4], dim=1)
-        c9 = self.top_down_layer_1(c8)
-        ## P4 -> P3
-        c10 = self.reduce_layer_2(c9)
-        c11 = F.interpolate(c10, scale_factor=2.0)
-        c12 = torch.cat([c11, c3], dim=1)
-        c13 = self.top_down_layer_2(c12)
-
-        # Bottom up
-        ## p3 -> P4
-        c14 = self.downsample_layer_1(c13)
-        c15 = torch.cat([c14, c10], dim=1)
-        c16 = self.bottom_up_layer_1(c15)
-        ## P4 -> P5
-        c17 = self.downsample_layer_2(c16)
-        c18 = torch.cat([c17, c6], dim=1)
-        c19 = self.bottom_up_layer_2(c18)
-
-        out_feats = [c13, c16, c19] # [P3, P4, P5]
-        
-        # output proj layers
-        if self.out_layers is not None:
-            out_feats_proj = []
-            for feat, layer in zip(out_feats, self.out_layers):
-                out_feats_proj.append(layer(feat))
-            return out_feats_proj
-
-        return out_feats
-
-
-def build_fpn(cfg, in_dims, out_dim=None, input_proj=False):
-    model = cfg['fpn']
-    # build pafpn
-    if model == 'rtcdet_pafpn':
-        fpn_net = RTCDetPaFPN(cfg, in_dims, out_dim, input_proj)
-
-    return fpn_net

+ 0 - 164
models/detectors/rtcdet_v1/rtcdet_v1_pred.py

@@ -1,164 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-# Single-level pred layer
-class SingleLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, num_classes, num_coords=4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.obj_pred = nn.Conv2d(reg_dim, 1, kernel_size=1)
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-
-    def init_bias(self):
-        # Init bias
-        init_prob = 0.01
-        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-        # obj pred
-        b = self.obj_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # cls pred
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        w = self.reg_pred.weight
-        w.data.fill_(0.)
-        self.reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
-
-
-    def forward(self, cls_feat, reg_feat):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        obj_pred = self.obj_pred(reg_feat)
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        return obj_pred, cls_pred, reg_pred
-    
-
-# Multi-level pred layer
-class MultiLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3, reg_max=16):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-        self.num_levels = num_levels
-        self.reg_max = reg_max
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [SingleLevelPredLayer(
-                cls_dim,
-                reg_dim,
-                num_classes,
-                num_coords * self.reg_max)
-                for _ in range(num_levels)
-            ])
-        ## proj conv
-        self.proj = nn.Parameter(torch.linspace(0, reg_max, reg_max), requires_grad=False)
-        self.proj_conv = nn.Conv2d(self.reg_max, 1, kernel_size=1, bias=False)
-        self.proj_conv.weight = nn.Parameter(self.proj.view([1, reg_max, 1, 1]).clone().detach(), requires_grad=False)
-
-
-    def generate_anchors(self, level, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.strides[level]
-
-        return anchors
-        
-
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_strides = []
-        all_obj_preds = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.num_levels):
-            # pred
-            obj_pred, cls_pred, reg_pred = self.multi_level_preds[level](
-                cls_feats[level], reg_feats[level])
-
-            # generate anchor boxes: [M, 4]
-            B, _, H, W = cls_pred.size()
-            fmp_size = [H, W]
-            anchors = self.generate_anchors(level, fmp_size)
-            anchors = anchors.to(cls_pred.device)
-            # stride tensor: [M, 1]
-            stride_tensor = torch.ones_like(anchors[..., :1]) * self.strides[level]
-            
-            # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-            obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 1)
-            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-
-            # ----------------------- Decode bbox -----------------------
-            B, M = reg_pred.shape[:2]
-            # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max] -> [B, 4, M, reg_max]
-            reg_pred_ = reg_pred.reshape([B, M, 4, self.reg_max])
-            # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-            reg_pred_ = reg_pred_.permute(0, 3, 2, 1).contiguous()
-            # [B, reg_max, 4, M] -> [B, 1, 4, M]
-            reg_pred_ = self.proj_conv(F.softmax(reg_pred_, dim=1))
-            # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-            reg_pred_ = reg_pred_.view(B, 4, M).permute(0, 2, 1).contiguous()
-            ## tlbr -> xyxy
-            x1y1_pred = anchors[None] - reg_pred_[..., :2] * self.strides[level]
-            x2y2_pred = anchors[None] + reg_pred_[..., 2:] * self.strides[level]
-            box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-            all_obj_preds.append(obj_pred)
-            all_cls_preds.append(cls_pred)
-            all_reg_preds.append(reg_pred)
-            all_box_preds.append(box_pred)
-            all_anchors.append(anchors)
-            all_strides.append(stride_tensor)
-        
-        # output dict
-        outputs = {"pred_obj": all_obj_preds,        # List(Tensor) [B, M, 1]
-                   "pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                   "pred_reg": all_reg_preds,        # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                   "anchors": all_anchors,           # List(Tensor) [M, 2]
-                   "strides": self.strides,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": all_strides      # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-    
-
-# build detection head
-def build_pred_layer(cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3):
-    pred_layers = MultiLevelPredLayer(cls_dim, reg_dim, strides, num_classes, num_coords, num_levels) 
-
-    return pred_layers

+ 0 - 23
models/detectors/rtcdet_v2/README.md

@@ -1,23 +0,0 @@
-# RTCDet-v2: My Second Empirical Study of Real-Time Convolutional Object Detectors.
-
-|   Model    | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|------------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDetv2-N |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-T |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-S |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-M |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-L |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-X |  640  |       |                         |                    |                        |                   |                   |                    |  |
-
-|   Model    | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|------------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDetv2-P |  320  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-P |  416  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-P |  512  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDetv2-P |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-
-- For training, we train my RTCDetv2 series series with 300 epochs on COCO.
-- For data augmentation, we use the large scale jitter (LSJ), Mosaic augmentation and Mixup augmentation, following the setting of [YOLOX](https://github.com/ultralytics/yolov5), but we remove the rotation transformation which is used in YOLOX's strong augmentation.
-- For optimizer, we use AdamW with weight decay 0.05 and base per image lr 0.001 / 64.
-- For learning rate scheduler, we use linear decay scheduler.
-- Due to my limited computing resources, I can not train `RTCDetv2-X` with the setting of `batch size=128`.

+ 1 - 1
models/detectors/yolox/README.md

@@ -2,7 +2,7 @@
 
 |   Model |   Backbone   | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
 |---------|--------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| YOLOX-S | CSPDarkNet-S | 8xb8  |  640  |         39.0           |       58.8        |   26.8            |   8.9              | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_s_coco.pth) |
+| YOLOX-S | CSPDarkNet-S | 8xb8  |  640  |         40.1           |       60.3        |   26.8            |   8.9              | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_s_coco.pth) |
 | YOLOX-M | CSPDarkNet-M | 8xb8  |  640  |         46.2           |       66.0        |   74.3            |   25.4             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_m_coco.pth) |
 | YOLOX-L | CSPDarkNet-L | 8xb8  |  640  |         48.7           |       68.0        |   155.4           |   54.2             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_l_coco.pth) |
 | YOLOX-X | CSPDarkNet-X | 8xb8  |  640  |                        |                   |                   |                    |  |