yjh0410 vor 2 Jahren
Ursprung
Commit
8f4509bd61

+ 6 - 1
README.md

@@ -138,7 +138,12 @@ I have provided a bash file `train_ddp.sh` that enables DDP training. I hope som
 
 | Model         |   Backbone         | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
 |---------------|--------------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| YOLOX-L       | CSPDarkNet-L       |  640  |  300  |        46.6            |       66.1        |   155.4           |   54.2             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_coco.pth) |
+| YOLOX-N       | CSPDarkNet-N       |  640  |  300  |                        |                   |   7.5             |   2.3              |  |
+| YOLOX-S       | CSPDarkNet-S       |  640  |  300  |                        |                   |   27.1            |   9.0              |  |
+| YOLOX-M       | CSPDarkNet-M       |  640  |  300  |                        |                   |   74.3            |   25.4             |  |
+| YOLOX-L       | CSPDarkNet-L       |  640  |  300  |                        |                   |   155.4           |   54.2             |  |
+
+*For **YOLOX-L**, increasing the batch size may improve performance. Due to my computing resources, I can only set the batch size to 16.*
 
 * YOLOv7:
 

+ 5 - 1
README_CN.md

@@ -138,7 +138,11 @@ python train.py --cuda -d coco --root path/to/COCO -v yolov1 -bs 16 --max_epoch
 
 | Model         |   Backbone         | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
 |---------------|--------------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| YOLOX-L       | CSPDarkNet-L       |  640  |  300  |        46.6            |       66.1        |   155.4           |   54.2             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_coco.pth) |
+| YOLOX-N       | CSPDarkNet-N       |  640  |  300  |                        |                   |   7.5             |   2.3              |  |
+| YOLOX-S       | CSPDarkNet-S       |  640  |  300  |                        |                   |   27.1            |   9.0              |  |
+| YOLOX-M       | CSPDarkNet-M       |  640  |  300  |                        |                   |   74.3            |   25.4             |  |
+| YOLOX-L       | CSPDarkNet-L       |  640  |  300  |                        |                   |   155.4           |   54.2             |  |
+<!-- | YOLOX-L       | CSPDarkNet-L       |  640  |  300  |        46.6            |       66.1        |   155.4           |   54.2             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolox_coco.pth) | -->
 
 * YOLOv7:
 

+ 2 - 6
config/__init__.py

@@ -5,7 +5,6 @@ from .yolov3_config import yolov3_cfg
 from .yolov4_config import yolov4_cfg
 from .yolov5_config import yolov5_cfg
 from .yolov7_config import yolov7_cfg
-from .yolov5_plus_config import yolov5_plus_cfg
 from .yolox_config import yolox_cfg
 
 
@@ -27,15 +26,12 @@ def build_model_config(args):
     # YOLOv5
     elif args.model in ['yolov5_n', 'yolov5_s', 'yolov5_m', 'yolov5_l', 'yolov5_x']:
         cfg = yolov5_cfg[args.model]
-    # YOLOv5-Plus
-    elif args.model in ['yolov5_plus_n', 'yolov5_plus_s', 'yolov5_plus_m', 'yolov5_plus_l', 'yolov5_plus_x']:
-        cfg = yolov5_plus_cfg[args.model]
     # YOLOv7
     elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
         cfg = yolov7_cfg[args.model]
     # YOLOX
-    elif args.model == 'yolox':
-        cfg = yolox_cfg
+    elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
+        cfg = yolox_cfg[args.model]
 
     return cfg
 

+ 0 - 364
config/yolov5_plus_config.py

@@ -1,364 +0,0 @@
-# yolov8 config
-
-yolov5_plus_cfg = {
-    'yolov5_plus_n':{
-        # input
-        'trans_type': 'yolov5_tiny',
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
-        # ----------------- Model config -----------------
-        ## Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        ## Neck: SPP
-        'neck': 'sppf',
-        'expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: FPN
-        'fpn': 'yolov5_plus_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'ELAN_CSPBlock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
-                        [30, 61],   [62, 45],   [59, 119],    # P4
-                        [116, 90],  [156, 198], [373, 326]],  # P5
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ----------------- Label Assignment config -----------------
-        'matcher': {
-            ## For fixed assigner
-            'anchor_thresh': 4.0,
-            ## For dynamic assigner
-            'topk': 10,
-            'alpha': 0.5,
-            'beta': 6.0},
-        # ----------------- Loss config -----------------
-        'cls_loss': 'bce',
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        # ----------------- Train config -----------------
-        ## stop strong augment
-        'no_aug_epoch': 20,
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-        ## WarmUpLR schedule
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolov5_plus_s':{
-        # input
-        'trans_type': 'yolov5_small',
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
-        # ----------------- Model config 
-        # Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.5,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        # Neck: SPP
-        'neck': 'sppf',
-        'expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        # Neck: FPN
-        'fpn': 'yolov5_plus_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'ELAN_CSPBlock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
-                        [30, 61],   [62, 45],   [59, 119],    # P4
-                        [116, 90],  [156, 198], [373, 326]],  # P5
-        # Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ----------------- Label Assignment config -----------------
-        'matcher': {
-            ## For fixed assigner
-            'anchor_thresh': 4.0,
-            ## For dynamic assigner
-            'topk': 10,
-            'alpha': 0.5,
-            'beta': 6.0},
-        # ----------------- Loss config -----------------
-        'cls_loss': 'bce',
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        # ----------------- Train config -----------------
-        # stop strong augment
-        'no_aug_epoch': 20,
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-        ## WarmUpLR schedule
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolov5_plus_m':{
-        # input
-        'trans_type': 'yolov5_medium',
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
-        # ----------------- Model config 
-        # Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.75,
-        'depth': 0.67,
-        'ratio': 1.5,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        # Neck: SPP
-        'neck': 'sppf',
-        'expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        # Neck: FPN
-        'fpn': 'yolov5_plus_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'ELAN_CSPBlock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
-                        [30, 61],   [62, 45],   [59, 119],    # P4
-                        [116, 90],  [156, 198], [373, 326]],  # P5
-        # Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ----------------- Label Assignment config -----------------
-        'matcher': {
-            ## For fixed assigner
-            'anchor_thresh': 4.0,
-            ## For dynamic assigner
-            'topk': 10,
-            'alpha': 0.5,
-            'beta': 6.0},
-        # ----------------- Loss config -----------------
-        'cls_loss': 'bce',
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        # ----------------- Train config -----------------
-        # stop strong augment
-        'no_aug_epoch': 20,
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-        ## WarmUpLR schedule
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolov5_plus_l':{
-        # input
-        'trans_type': 'yolov5_large',
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
-        # ----------------- Model config 
-        # Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        # Neck: SPP
-        'neck': 'sppf',
-        'expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        # Neck: FPN
-        'fpn': 'yolov5_plus_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'ELAN_CSPBlock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
-                        [30, 61],   [62, 45],   [59, 119],    # P4
-                        [116, 90],  [156, 198], [373, 326]],  # P5
-        # Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ----------------- Label Assignment config -----------------
-        'matcher': {
-            ## For fixed assigner
-            'anchor_thresh': 4.0,
-            ## For dynamic assigner
-            'topk': 10,
-            'alpha': 0.5,
-            'beta': 6.0},
-        # ----------------- Loss config -----------------
-        'cls_loss': 'bce',
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        # ----------------- Train config -----------------
-        # stop strong augment
-        'no_aug_epoch': 20,
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-        ## WarmUpLR schedule
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolov5_plus_x':{
-        # input
-        'trans_type': 'yolov5_huge',
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
-        # ----------------- Model config 
-        # Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.25,
-        'depth': 1.0,
-        'ratio': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        # Neck: SPP
-        'neck': 'sppf',
-        'expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        # Neck: FPN
-        'fpn': 'yolov5_plus_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'ELAN_CSPBlock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
-                        [30, 61],   [62, 45],   [59, 119],    # P4
-                        [116, 90],  [156, 198], [373, 326]],  # P5
-        # Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ----------------- Label Assignment config -----------------
-        'matcher': {
-            ## For fixed assigner
-            'anchor_thresh': 4.0,
-            ## For dynamic assigner
-            'topk': 10,
-            'alpha': 0.5,
-            'beta': 6.0},
-        # ----------------- Loss config -----------------
-        'cls_loss': 'bce',
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        # ----------------- Train config -----------------
-        # stop strong augment
-        'no_aug_epoch': 20,
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-        ## WarmUpLR schedule
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-}

+ 291 - 47
config/yolox_config.py

@@ -1,51 +1,295 @@
 # YOLOx Config
 
+
 yolox_cfg = {
-    # input
-    'trans_type': 'yolov5_strong',
-    'multi_scale': [0.5, 1.0],
-    # model
-    'backbone': 'cspdarknet',
-    'pretrained': True,
-    'bk_act': 'silu',
-    'bk_norm': 'BN',
-    'bk_dpw': False,
-    'stride': [8, 16, 32],  # P3, P4, P5
-    'width': 1.0,
-    'depth': 1.0,
-     # fpn
-    'fpn': 'yolo_pafpn',
-    'fpn_act': 'silu',
-    'fpn_norm': 'BN',
-    'fpn_depthwise': False,
-    # head
-    'head': 'decoupled_head',
-    'head_act': 'silu',
-    'head_norm': 'BN',
-    'num_cls_head': 2,
-    'num_reg_head': 2,
-    'head_depthwise': False,
-    # matcher
-    'matcher': {'center_sampling_radius': 2.5,
-                'topk_candicate': 10},
-    # loss weight
-    'loss_obj_weight': 1.0,
-    'loss_cls_weight': 1.0,
-    'loss_box_weight': 5.0,
-    # training configuration
-    'no_aug_epoch': 20,
-    # optimizer
-    'optimizer': 'sgd',        # optional: sgd, adam, adamw
-    'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-    'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-    'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-    # model EMA
-    'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-    'ema_tau': 2000,
-    # lr schedule
-    'scheduler': 'linear',
-    'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-    'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-    'warmup_momentum': 0.8,
-    'warmup_bias_lr': 0.1,
+    'yolox_n':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.25,
+        'depth': 0.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
+        'trans_type': 'yolov5_tiny',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'matcher': {'center_sampling_radius': 2.5,
+                    'topk_candicate': 10},
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 20,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolox_s':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.50,
+        'depth': 0.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
+        'trans_type': 'yolov5_small',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'matcher': {'center_sampling_radius': 2.5,
+                    'topk_candicate': 10},
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 20,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolox_m':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.75,
+        'depth': 0.67,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
+        'trans_type': 'yolov5_medium',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'matcher': {'center_sampling_radius': 2.5,
+                    'topk_candicate': 10},
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 20,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolox_l':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': False,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 1.0,
+        'depth': 1.0,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.25],   # 320 -> 800
+        'trans_type': 'yolov5_large',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'matcher': {'center_sampling_radius': 2.5,
+                    'topk_candicate': 10},
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 20,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolox_x':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 1.25,
+        'depth': 1.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.25],   # 320 -> 800
+        'trans_type': 'yolov5_huge',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'matcher': {'center_sampling_radius': 2.5,
+                    'topk_candicate': 10},
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 20,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
 }

+ 1 - 6
models/detectors/__init__.py

@@ -8,7 +8,6 @@ from .yolov3.build import build_yolov3
 from .yolov4.build import build_yolov4
 from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
-from .yolov5_plus.build import build_yolov5_plus
 from .yolox.build import build_yolox
 
 
@@ -42,12 +41,8 @@ def build_model(args,
     elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
         model, criterion = build_yolov7(
             args, model_cfg, device, num_classes, trainable)
-    # YOLOv5-Plus
-    elif args.model in ['yolov5_plus_n', 'yolov5_plus_s', 'yolov5_plus_m', 'yolov5_plus_l', 'yolov5_plus_x']:
-        model, criterion = build_yolov5_plus(
-            args, model_cfg, device, num_classes, trainable)
     # YOLOX   
-    elif args.model == 'yolox':
+    elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         model, criterion = build_yolox(
             args, model_cfg, device, num_classes, trainable)
 

+ 0 - 58
models/detectors/yolov5_plus/build.py

@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .yolov5_plus import YOLOv5_Plus
-
-
-# build object detector
-def build_yolov5_plus(args, cfg, device, num_classes=80, trainable=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-    
-    print('==============================')
-    print('Model Configuration: \n', cfg)
-    
-    # -------------- Build YOLO --------------
-    model = YOLOv5_Plus(
-        cfg=cfg,
-        device=device, 
-        num_classes=num_classes,
-        trainable=trainable,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        topk=args.topk
-        )
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-    # Init bias
-    init_prob = 0.01
-    bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-    # cls pred
-    for cls_pred in model.cls_preds:
-        b = cls_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-    # reg pred
-    for reg_pred in model.reg_preds:
-        b = reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        w = reg_pred.weight
-        w.data.fill_(0.)
-        reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
-
-
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes, warmup_epoch=args.wp_epoch)
-    return model, criterion

+ 0 - 280
models/detectors/yolov5_plus/loss.py

@@ -1,280 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from .matcher import TaskAlignedAssigner, Yolov5Matcher
-from utils.box_ops import bbox_iou, get_ious
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-
-
-class Criterion(object):
-    def __init__(self, 
-                 cfg, 
-                 device, 
-                 num_classes=80,
-                 warmup_epoch=1):
-        # ------------------ Basic Parameters ------------------
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        self.warmup_epoch = warmup_epoch
-        self.warmup_stage = True
-        # ------------------ Loss Parameters ------------------
-        ## loss function
-        self.cls_lossf = ClassificationLoss(cfg, reduction='none')
-        self.reg_lossf = RegressionLoss(num_classes)
-        ## loss coeff
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_iou_weight = cfg['loss_iou_weight']
-        # ------------------ Label Assigner ------------------
-        matcher_config = cfg['matcher']
-        ## matcher-1
-        self.fixed_matcher = Yolov5Matcher(
-            num_classes=num_classes, 
-            num_anchors=3, 
-            anchor_size=cfg['anchor_size'],
-            anchor_theshold=matcher_config['anchor_thresh']
-            )
-        ## matcher-2
-        self.dynamic_matcher = TaskAlignedAssigner(
-            topk=matcher_config['topk'],
-            num_classes=num_classes,
-            alpha=matcher_config['alpha'],
-            beta=matcher_config['beta']
-            )
-
-
-    def fixed_assignment_loss(self, outputs, targets):
-        device = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        fmp_sizes = outputs['fmp_sizes']
-        (
-            gt_objectness, 
-            gt_classes, 
-            gt_bboxes,
-            ) = self.fixed_matcher(fmp_sizes=fmp_sizes, 
-                                   fpn_strides=fpn_strides, 
-                                   targets=targets)
-        # List[B, M, C] -> [B, M, C] -> [BM, C]
-        pred_cls = torch.cat(outputs['pred_cls'], dim=1).view(-1, self.num_classes)    # [BM, C]
-        pred_box = torch.cat(outputs['pred_box'], dim=1).view(-1, 4)                   # [BM, 4]
-       
-        gt_objectness = gt_objectness.view(-1).to(device).float()               # [BM,]
-        gt_classes = gt_classes.view(-1, self.num_classes).to(device).float()   # [BM, C]
-        gt_bboxes = gt_bboxes.view(-1, 4).to(device).float()                    # [BM, 4]
-
-        pos_masks = (gt_objectness > 0)
-        num_fgs = pos_masks.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # box loss
-        ious = get_ious(pred_box[pos_masks],
-                        gt_bboxes[pos_masks],
-                        box_mode="xyxy",
-                        iou_type='giou')
-        loss_box = 1.0 - ious
-        loss_box = loss_box.sum() / num_fgs
-        
-        # cls loss
-        gt_classes[pos_masks] = gt_classes[pos_masks] * ious.unsqueeze(-1).clamp(0.)
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_classes, reduction='none')
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_cls_weight * loss_cls + \
-                 self.loss_iou_weight * loss_box
-
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                losses = losses
-        )
-
-        return loss_dict
-
-
-    def dynamic_assignment_loss(self, outputs, targets):
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        anchors = outputs['anchors']
-        anchors = torch.cat(anchors, dim=0)
-        num_anchors = anchors.shape[0]
-
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        
-        # label assignment
-        gt_score_targets = []
-        gt_bbox_targets = []
-        fg_masks = []
-
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)     # [Mp,]
-            tgt_boxs = targets[batch_idx]["boxes"].to(device)        # [Mp, 4]
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
-                # There is no valid gt
-                fg_mask = cls_preds.new_zeros(1, num_anchors).bool()               #[1, M,]
-                gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
-                gt_box = cls_preds.new_zeros((1, num_anchors, 4))                  #[1, M, 4]
-            else:
-                tgt_labels = tgt_labels[None, :, None]      # [1, Mp, 1]
-                tgt_boxs = tgt_boxs[None]                   # [1, Mp, 4]
-                (
-                    _,
-                    gt_box,     #[1, M, 4]
-                    gt_score,   #[1, M, C]
-                    fg_mask,    #[1, M,]
-                    _
-                ) = self.dynamic_matcher(
-                    pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(), 
-                    pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
-                    anc_points = anchors[..., :2],
-                    gt_labels = tgt_labels,
-                    gt_bboxes = tgt_boxs
-                    )
-            gt_score_targets.append(gt_score)
-            gt_bbox_targets.append(gt_box)
-            fg_masks.append(fg_mask)
-
-        # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
-        fg_masks = torch.cat(fg_masks, 0).view(-1)                                    # [BM,]
-        gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes)  # [BM, C]
-        gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4)                   # [BM, 4]
-        
-        # cls loss
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.cls_lossf(cls_preds, gt_score_targets)
-
-        # reg loss
-        bbox_weight = gt_score_targets[fg_masks].sum(-1, keepdim=True)                 # [BM, 1]
-        box_preds = box_preds.view(-1, 4)                                              # [BM, 4]
-        loss_iou = self.reg_lossf(
-            pred_boxs = box_preds,
-            gt_boxs = gt_bbox_targets,
-            bbox_weight = bbox_weight,
-            fg_masks = fg_masks
-            )
-
-        num_fgs = gt_score_targets.sum()
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # normalize loss
-        loss_cls = loss_cls.sum() / num_fgs
-        loss_iou = loss_iou.sum() / num_fgs
-
-        # total loss
-        losses = loss_cls * self.loss_cls_weight + \
-                 loss_iou * self.loss_iou_weight
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_iou = loss_iou,
-                losses = losses
-        )
-
-        return loss_dict
-
-
-    def __call__(self, outputs, targets, epoch=0):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_regs']: List(Tensor) [B, M, 4*(reg_max+1)]
-            outputs['pred_boxs']: List(Tensor) [B, M, 4]
-            outputs['anchors']: List(Tensor) [M, 2]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            outputs['stride_tensor']: List(Tensor) [M, 1]
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        # Fixed LA stage
-        if epoch < self.warmup_epoch:
-            return self.fixed_assignment_loss(outputs, targets)
-        # Switch to Dynamic LA stage
-        elif epoch >= self.warmup_epoch:
-            if self.warmup_stage:
-                print('Switch to Dynamic Label Assignment.')
-                self.warmup_stage = False
-            return self.dynamic_assignment_loss(outputs, targets)
-    
-
-class ClassificationLoss(nn.Module):
-    def __init__(self, cfg, reduction='none'):
-        super(ClassificationLoss, self).__init__()
-        self.cfg = cfg
-        self.reduction = reduction
-
-
-    def binary_cross_entropy(self, pred_logits, gt_score):
-        loss = F.binary_cross_entropy_with_logits(
-            pred_logits.float(), gt_score.float(), reduction='none')
-
-        if self.reduction == 'sum':
-            loss = loss.sum()
-        elif self.reduction == 'mean':
-            loss = loss.mean()
-
-        return loss
-
-
-    def forward(self, pred_logits, gt_score):
-        if self.cfg['cls_loss'] == 'bce':
-            return self.binary_cross_entropy(pred_logits, gt_score)
-
-
-class RegressionLoss(nn.Module):
-    def __init__(self, num_classes):
-        super(RegressionLoss, self).__init__()
-        self.num_classes = num_classes
-
-
-    def forward(self, pred_boxs, gt_boxs, bbox_weight, fg_masks):
-        """
-        Input:
-            pred_boxs: (Tensor) [BM, 4]
-            anchors: (Tensor) [BM, 2]
-            gt_boxs: (Tensor) [BM, 4]
-            bbox_weight: (Tensor) [BM, 1]
-            fg_masks: (Tensor) [BM,]
-            strides: (Tensor) [BM, 1]
-        """
-        # select positive samples mask
-        num_pos = fg_masks.sum()
-
-        if num_pos > 0:
-            pred_boxs_pos = pred_boxs[fg_masks]
-            gt_boxs_pos = gt_boxs[fg_masks]
-
-            # iou loss
-            ious = bbox_iou(pred_boxs_pos,
-                            gt_boxs_pos,
-                            xywh=False,
-                            CIoU=True)
-            loss_iou = (1.0 - ious) * bbox_weight
-               
-        else:
-            loss_iou = pred_boxs.sum() * 0.
-
-        return loss_iou
-
-
-def build_criterion(cfg, device, num_classes, warmup_epoch=1):
-    criterion = Criterion(
-        cfg=cfg,
-        device=device,
-        num_classes=num_classes,
-        warmup_epoch=warmup_epoch,
-        )
-
-    return criterion
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 419
models/detectors/yolov5_plus/matcher.py

@@ -1,419 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import bbox_iou
-
-
-# -------------------------- YOLOv5 Assigner --------------------------
-class Yolov5Matcher(object):
-    def __init__(self, num_classes, num_anchors, anchor_size, anchor_theshold):
-        self.num_classes = num_classes
-        self.num_anchors = num_anchors
-        self.anchor_theshold = anchor_theshold
-        # [KA, 2]
-        self.anchor_sizes = np.array([[anchor[0], anchor[1]]
-                                      for anchor in anchor_size])
-        # [KA, 4]
-        self.anchor_boxes = np.array([[0., 0., anchor[0], anchor[1]]
-                                      for anchor in anchor_size])
-
-    def compute_iou(self, anchor_boxes, gt_box):
-        """
-            anchor_boxes : ndarray -> [KA, 4] (cx, cy, bw, bh).
-            gt_box : ndarray -> [1, 4] (cx, cy, bw, bh).
-        """
-        # anchors: [KA, 4]
-        anchors = np.zeros_like(anchor_boxes)
-        anchors[..., :2] = anchor_boxes[..., :2] - anchor_boxes[..., 2:] * 0.5  # x1y1
-        anchors[..., 2:] = anchor_boxes[..., :2] + anchor_boxes[..., 2:] * 0.5  # x2y2
-        anchors_area = anchor_boxes[..., 2] * anchor_boxes[..., 3]
-        
-        # gt_box: [1, 4] -> [KA, 4]
-        gt_box = np.array(gt_box).reshape(-1, 4)
-        gt_box = np.repeat(gt_box, anchors.shape[0], axis=0)
-        gt_box_ = np.zeros_like(gt_box)
-        gt_box_[..., :2] = gt_box[..., :2] - gt_box[..., 2:] * 0.5  # x1y1
-        gt_box_[..., 2:] = gt_box[..., :2] + gt_box[..., 2:] * 0.5  # x2y2
-        gt_box_area = np.prod(gt_box[..., 2:] - gt_box[..., :2], axis=1)
-
-        # intersection
-        inter_w = np.minimum(anchors[:, 2], gt_box_[:, 2]) - \
-                  np.maximum(anchors[:, 0], gt_box_[:, 0])
-        inter_h = np.minimum(anchors[:, 3], gt_box_[:, 3]) - \
-                  np.maximum(anchors[:, 1], gt_box_[:, 1])
-        inter_area = inter_w * inter_h
-        
-        # union
-        union_area = anchors_area + gt_box_area - inter_area
-
-        # iou
-        iou = inter_area / union_area
-        iou = np.clip(iou, a_min=1e-10, a_max=1.0)
-        
-        return iou
-
-
-    def iou_assignment(self, ctr_points, gt_box, fpn_strides):
-        # compute IoU
-        iou = self.compute_iou(self.anchor_boxes, gt_box)
-        iou_mask = (iou > 0.5)
-
-        label_assignment_results = []
-        if iou_mask.sum() == 0:
-            # We assign the anchor box with highest IoU score.
-            iou_ind = np.argmax(iou)
-
-            level = iou_ind // self.num_anchors              # pyramid level
-            anchor_idx = iou_ind - level * self.num_anchors  # anchor index
-
-            # get the corresponding stride
-            stride = fpn_strides[level]
-
-            # compute the grid cell
-            xc, yc = ctr_points
-            xc_s = xc / stride
-            yc_s = yc / stride
-            grid_x = int(xc_s)
-            grid_y = int(yc_s)
-
-            label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
-        else:            
-            for iou_ind, iou_m in enumerate(iou_mask):
-                if iou_m:
-                    level = iou_ind // self.num_anchors              # pyramid level
-                    anchor_idx = iou_ind - level * self.num_anchors  # anchor index
-
-                    # get the corresponding stride
-                    stride = fpn_strides[level]
-
-                    # compute the gride cell
-                    xc, yc = ctr_points
-                    xc_s = xc / stride
-                    yc_s = yc / stride
-                    grid_x = int(xc_s)
-                    grid_y = int(yc_s)
-
-                    label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
-
-        return label_assignment_results
-
-
-    def aspect_ratio_assignment(self, ctr_points, keeps, fpn_strides):
-        label_assignment_results = []
-        for keep_idx, keep in enumerate(keeps):
-            if keep:
-                level = keep_idx // self.num_anchors              # pyramid level
-                anchor_idx = keep_idx - level * self.num_anchors  # anchor index
-
-                # get the corresponding stride
-                stride = fpn_strides[level]
-
-                # compute the gride cell
-                xc, yc = ctr_points
-                xc_s = xc / stride
-                yc_s = yc / stride
-                grid_x = int(xc_s)
-                grid_y = int(yc_s)
-
-                label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
-        
-        return label_assignment_results
-    
-
-    @torch.no_grad()
-    def __call__(self, fmp_sizes, fpn_strides, targets):
-        """
-            fmp_size: (List) [fmp_h, fmp_w]
-            fpn_strides: (List) -> [8, 16, 32, ...] stride of network output.
-            targets: (Dict) dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}
-        """
-        assert len(fmp_sizes) == len(fpn_strides)
-        # prepare
-        bs = len(targets)
-        gt_objectness = [
-            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 1]) 
-            for (fmp_h, fmp_w) in fmp_sizes
-            ]
-        gt_classes = [
-            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, self.num_classes]) 
-            for (fmp_h, fmp_w) in fmp_sizes
-            ]
-        gt_bboxes = [
-            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 4]) 
-            for (fmp_h, fmp_w) in fmp_sizes
-            ]
-
-        for batch_index in range(bs):
-            targets_per_image = targets[batch_index]
-            # [N,]
-            tgt_cls = targets_per_image["labels"].numpy()
-            # [N, 4]
-            tgt_box = targets_per_image['boxes'].numpy()
-
-            for gt_box, gt_label in zip(tgt_box, tgt_cls):
-                # get a bbox coords
-                x1, y1, x2, y2 = gt_box.tolist()
-                # xyxy -> cxcywh
-                xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
-                bw, bh = x2 - x1, y2 - y1
-                gt_box = np.array([[0., 0., bw, bh]])
-
-                # check target
-                if bw < 1. or bh < 1.:
-                    # invalid target
-                    continue
-
-                # compute aspect ratio
-                ratios = gt_box[..., 2:] / self.anchor_sizes
-                keeps = np.maximum(ratios, 1 / ratios).max(-1) < self.anchor_theshold
-
-                if keeps.sum() == 0:
-                    label_assignment_results = self.iou_assignment([xc, yc], gt_box, fpn_strides)
-                else:
-                    label_assignment_results = self.aspect_ratio_assignment([xc, yc], keeps, fpn_strides)
-
-                # label assignment
-                for result in label_assignment_results:
-                    # assignment
-                    grid_x, grid_y, xc_s, yc_s, level, anchor_idx = result
-                    stride = fpn_strides[level]
-                    fmp_h, fmp_w = fmp_sizes[level]
-                    # coord on the feature
-                    x1s, y1s = x1 / stride, y1 / stride
-                    x2s, y2s = x2 / stride, y2 / stride
-                    # offset
-                    off_x = xc_s - grid_x
-                    off_y = yc_s - grid_y
- 
-                    if off_x <= 0.5 and off_y <= 0.5:  # top left
-                        grids = [(grid_x-1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
-                    elif off_x > 0.5 and off_y <= 0.5: # top right
-                        grids = [(grid_x+1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
-                    elif off_x <= 0.5 and off_y > 0.5: # bottom left
-                        grids = [(grid_x-1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
-                    elif off_x > 0.5 and off_y > 0.5:  # bottom right
-                        grids = [(grid_x+1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
-
-                    for (i, j) in grids:
-                        is_in_box = (j >= y1s and j < y2s) and (i >= x1s and i < x2s)
-                        is_valid = (j >= 0 and j < fmp_h) and (i >= 0 and i < fmp_w)
-
-                        if is_in_box and is_valid:
-                            # obj
-                            gt_objectness[level][batch_index, j, i, anchor_idx] = 1.0
-                            # cls
-                            cls_ont_hot = torch.zeros(self.num_classes)
-                            cls_ont_hot[int(gt_label)] = 1.0
-                            gt_classes[level][batch_index, j, i, anchor_idx] = cls_ont_hot
-                            # box
-                            gt_bboxes[level][batch_index, j, i, anchor_idx] = torch.as_tensor([x1, y1, x2, y2])
-
-        # [B, M, C]
-        gt_objectness = torch.cat([gt.view(bs, -1, 1) for gt in gt_objectness], dim=1).float()
-        gt_classes = torch.cat([gt.view(bs, -1, self.num_classes) for gt in gt_classes], dim=1).float()
-        gt_bboxes = torch.cat([gt.view(bs, -1, 4) for gt in gt_bboxes], dim=1).float()
-
-        return gt_objectness, gt_classes, gt_bboxes
-
-
-# -------------------------- Task Aligned Assigner --------------------------
-class TaskAlignedAssigner(nn.Module):
-    def __init__(self,
-                 topk=10,
-                 num_classes=80,
-                 alpha=0.5,
-                 beta=6.0, 
-                 eps=1e-9):
-        super(TaskAlignedAssigner, self).__init__()
-        self.topk = topk
-        self.num_classes = num_classes
-        self.bg_idx = num_classes
-        self.alpha = alpha
-        self.beta = beta
-        self.eps = eps
-
-    @torch.no_grad()
-    def forward(self,
-                pd_scores,
-                pd_bboxes,
-                anc_points,
-                gt_labels,
-                gt_bboxes):
-        """This code referenced to
-           https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py
-        Args:
-            pd_scores (Tensor): shape(bs, num_total_anchors, num_classes)
-            pd_bboxes (Tensor): shape(bs, num_total_anchors, 4)
-            anc_points (Tensor): shape(num_total_anchors, 2)
-            gt_labels (Tensor): shape(bs, n_max_boxes, 1)
-            gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-        Returns:
-            target_labels (Tensor): shape(bs, num_total_anchors)
-            target_bboxes (Tensor): shape(bs, num_total_anchors, 4)
-            target_scores (Tensor): shape(bs, num_total_anchors, num_classes)
-            fg_mask (Tensor): shape(bs, num_total_anchors)
-        """
-        self.bs = pd_scores.size(0)
-        self.n_max_boxes = gt_bboxes.size(1)
-
-        mask_pos, align_metric, overlaps = self.get_pos_mask(
-            pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
-
-        target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
-            mask_pos, overlaps, self.n_max_boxes)
-
-        # assigned target
-        target_labels, target_bboxes, target_scores = self.get_targets(
-            gt_labels, gt_bboxes, target_gt_idx, fg_mask)
-
-        # normalize
-        align_metric *= mask_pos
-        pos_align_metrics = align_metric.amax(axis=-1, keepdim=True)  # b, max_num_obj
-        pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True)  # b, max_num_obj
-        norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
-        target_scores = target_scores * norm_align_metric
-
-        return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
-
-
-    def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
-        # get in_gts mask, (b, max_num_obj, h*w)
-        mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
-        # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
-        # merge all mask to a final mask, (b, max_num_obj, h*w)
-        mask_pos = mask_topk * mask_in_gts
-
-        return mask_pos, align_metric, overlaps
-
-
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
-        ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.long().squeeze(-1)  # b, max_num_obj
-        # get the scores of each grid for each gt cls
-        bbox_scores = pd_scores[ind[0], :, ind[1]]  # b, max_num_obj, h*w
-
-        overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False,
-                            CIoU=True).squeeze(3).clamp(0)
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
-
-        return align_metric, overlaps
-
-
-    def select_topk_candidates(self, metrics, largest=True):
-        """
-        Args:
-            metrics: (b, max_num_obj, h*w).
-            topk_mask: (b, max_num_obj, topk) or None
-        """
-
-        num_anchors = metrics.shape[-1]  # h*w
-        # (b, max_num_obj, topk)
-        topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk])
-        # (b, max_num_obj, topk)
-        topk_idxs[~topk_mask] = 0
-        # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
-        # filter invalid bboxes
-        is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
-        return is_in_topk.to(metrics.dtype)
-
-
-    def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        """
-        Args:
-            gt_labels: (b, max_num_obj, 1)
-            gt_bboxes: (b, max_num_obj, 4)
-            target_gt_idx: (b, h*w)
-            fg_mask: (b, h*w)
-        """
-
-        # assigned target labels, (b, 1)
-        batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
-        target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
-        target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
-
-        # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
-        target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
-
-        # assigned target scores
-        target_labels.clamp(0)
-        target_scores = F.one_hot(target_labels, self.num_classes)  # (b, h*w, 80)
-        fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
-        target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
-
-        return target_labels, target_bboxes, target_scores
-    
-
-# -------------------------- Basic Functions --------------------------
-def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
-    """select the positive anchors's center in gt
-    Args:
-        xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
-        gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    n_anchors = xy_centers.size(0)
-    bs, n_max_boxes, _ = gt_bboxes.size()
-    _gt_bboxes = gt_bboxes.reshape([-1, 4])
-    xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
-    gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
-    gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
-    b_lt = xy_centers - gt_bboxes_lt
-    b_rb = gt_bboxes_rb - xy_centers
-    bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
-    bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
-    return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
-
-
-def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
-    """if an anchor box is assigned to multiple gts,
-        the one with the highest iou will be selected.
-    Args:
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-        overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    Return:
-        target_gt_idx (Tensor): shape(bs, num_total_anchors)
-        fg_mask (Tensor): shape(bs, num_total_anchors)
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    fg_mask = mask_pos.sum(axis=-2)
-    if fg_mask.max() > 1:
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
-        max_overlaps_idx = overlaps.argmax(axis=1)
-        is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
-        is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
-        fg_mask = mask_pos.sum(axis=-2)
-    target_gt_idx = mask_pos.argmax(axis=-2)
-    return target_gt_idx, fg_mask , mask_pos
-
-
-def iou_calculator(box1, box2, eps=1e-9):
-    """Calculate iou for batch
-    Args:
-        box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
-        box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    box1 = box1.unsqueeze(2)  # [N, M1, 4] -> [N, M1, 1, 4]
-    box2 = box2.unsqueeze(1)  # [N, M2, 4] -> [N, 1, M2, 4]
-    px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
-    gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
-    x1y1 = torch.maximum(px1y1, gx1y1)
-    x2y2 = torch.minimum(px2y2, gx2y2)
-    overlap = (x2y2 - x1y1).clip(0).prod(-1)
-    area1 = (px2y2 - px1y1).clip(0).prod(-1)
-    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
-    union = area1 + area2 - overlap + eps
-
-    return overlap / union

+ 0 - 251
models/detectors/yolov5_plus/yolov5_plus.py

@@ -1,251 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolov5_plus_backbone import build_backbone
-from .yolov5_plus_neck import build_neck
-from .yolov5_plus_pafpn import build_fpn
-from .yolov5_plus_head import build_head
-
-from utils.misc import multiclass_nms
-
-
-# Stronger YOLOv5
-class YOLOv5_Plus(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes = 20, 
-                 conf_thresh = 0.05,
-                 nms_thresh = 0.6,
-                 trainable = False, 
-                 topk = 1000):
-        super(YOLOv5_Plus, self).__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-        self.device = device
-        self.stride = cfg['stride']
-        self.num_classes = num_classes
-        self.trainable = trainable
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        
-        # ------------------- Anchor box -------------------
-        self.num_levels = 3
-        self.num_anchors = len(cfg['anchor_size']) // self.num_levels
-        self.anchor_size = torch.as_tensor(
-            cfg['anchor_size']
-            ).view(self.num_levels, self.num_anchors, 2) # [S, A, 2]
-        
-        # ------------------- Network Structure -------------------
-        ## backbone
-        self.backbone, feats_dim = build_backbone(cfg, cfg['pretrained']*trainable)
-
-        ## neck
-        self.neck = build_neck(cfg=cfg, in_dim=feats_dim[-1], out_dim=feats_dim[-1])
-        feats_dim[-1] = self.neck.out_dim
-        
-        ## fpn
-        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))
-        self.head_dim = self.fpn.out_dim
-
-        ## non-shared heads
-        self.non_shared_heads = nn.ModuleList(
-            [build_head(cfg, head_dim, head_dim, num_classes) 
-            for head_dim in self.head_dim
-            ])
-
-        ## pred
-        self.cls_preds = nn.ModuleList(
-                            [nn.Conv2d(head.cls_out_dim, self.num_classes * self.num_anchors, kernel_size=1) 
-                                for head in self.non_shared_heads
-                              ]) 
-        self.reg_preds = nn.ModuleList(
-                            [nn.Conv2d(head.reg_out_dim, 4 * self.num_anchors, kernel_size=1) 
-                                for head in self.non_shared_heads
-                              ])                 
-
-
-    # ---------------------- Basic Functions ----------------------
-    ## generate anchor points
-    def generate_anchors(self, level, fmp_size):
-        fmp_h, fmp_w = fmp_size
-        # [KA, 2]
-        anchor_size = self.anchor_size[level]
-
-        # generate grid cells
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        anchor_xy = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        # [HW, 2] -> [HW, KA, 2] -> [M, 2]
-        anchor_xy = anchor_xy.unsqueeze(1).repeat(1, self.num_anchors, 1)
-        anchor_xy = anchor_xy.view(-1, 2).to(self.device)
-        anchor_xy += 0.5
-        anchor_xy *= self.stride[level]
-
-        # [KA, 2] -> [1, KA, 2] -> [HW, KA, 2] -> [M, 2]
-        anchor_wh = anchor_size.unsqueeze(0).repeat(fmp_h*fmp_w, 1, 1)
-        anchor_wh = anchor_wh.view(-1, 2).to(self.device)
-
-        anchors = torch.cat([anchor_xy, anchor_wh], dim=-1)
-
-        return anchors
-        
-    ## post-process
-    def post_process(self, cls_preds, box_preds):
-        """
-        Input:
-            cls_preds: List(Tensor) [[H x W, C], ...]
-            box_preds: List(Tensor) [[H x W, 4], ...]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            # (H x W x C,)
-            scores_i = cls_pred_i.sigmoid().flatten()
-
-            # Keep top k top scoring indices only.
-            num_topk = min(self.topk, box_pred_i.size(0))
-
-            # torch.sort is actually faster than .topk (at least on GPUs)
-            predicted_prob, topk_idxs = scores_i.sort(descending=True)
-            topk_scores = predicted_prob[:num_topk]
-            topk_idxs = topk_idxs[:num_topk]
-
-            # filter out the proposals with low confidence score
-            keep_idxs = topk_scores > self.conf_thresh
-            topk_scores = topk_scores[keep_idxs]
-            topk_idxs = topk_idxs[keep_idxs]
-
-            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-            topk_labels = topk_idxs % self.num_classes
-            topk_bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(topk_scores)
-            all_labels.append(topk_labels)
-            all_bboxes.append(topk_bboxes)
-
-        scores = torch.cat(all_scores)
-        labels = torch.cat(all_labels)
-        bboxes = torch.cat(all_bboxes)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
-
-        return bboxes, scores, labels
-    
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # backbone
-        pyramid_feats = self.backbone(x)
-
-        # neck
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # fpn
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # non-shared heads
-        all_cls_preds = []
-        all_box_preds = []
-        all_anchors = []
-        for level, (feat, head) in enumerate(zip(pyramid_feats, self.non_shared_heads)):
-            cls_feat, reg_feat = head(feat)
-
-            # pred
-            cls_pred = self.cls_preds[level](cls_feat)  # [B, C*A, H, W]
-            reg_pred = self.reg_preds[level](reg_feat)  # [B, 4*A, H, W]
-
-            B, _, H, W = cls_pred.size()
-            fmp_size = [H, W]
-            # [M, 2]
-            anchors = self.generate_anchors(level, fmp_size)
-
-            # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-            cls_pred = cls_pred[0].permute(1, 2, 0).contiguous().view(-1, self.num_classes)
-            reg_pred = reg_pred[0].permute(1, 2, 0).contiguous().view(-1, 4)
-
-            # decode bbox
-            ctr_pred = anchors[..., :2] + reg_pred[..., :2] * self.stride[level]
-            wh_pred = torch.exp(reg_pred[..., 2:]) * anchors[..., 2:]
-            pred_x1y1 = ctr_pred - wh_pred * 0.5
-            pred_x2y2 = ctr_pred + wh_pred * 0.5
-            box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-            all_cls_preds.append(cls_pred)
-            all_box_preds.append(box_pred)
-            all_anchors.append(anchors)
-
-        # post process
-        bboxes, scores, labels = self.post_process(
-            all_cls_preds, all_box_preds)
-        
-        return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Training ----------------------
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # backbone
-            pyramid_feats = self.backbone(x)
-
-            # neck
-            pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-            # fpn
-            pyramid_feats = self.fpn(pyramid_feats)
-
-            # non-shared heads
-            all_fmp_sizes = []
-            all_cls_preds = []
-            all_box_preds = []
-            all_anchors = []
-            for level, (feat, head) in enumerate(zip(pyramid_feats, self.non_shared_heads)):
-                cls_feat, reg_feat = head(feat)
-
-                # pred
-                cls_pred = self.cls_preds[level](cls_feat)  # [B, C, H, W]
-                reg_pred = self.reg_preds[level](reg_feat)  # [B, 4*(reg_max), H, W]
-
-                B, _, H, W = cls_pred.size()
-                fmp_size = [H, W]
-                # generate anchor boxes: [M, 2]
-                anchors = self.generate_anchors(level, fmp_size)
-                
-                # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-                cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-                reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
-
-                # decode bbox
-                ctr_pred = anchors[..., :2] + reg_pred[..., :2] * self.stride[level]
-                wh_pred = torch.exp(reg_pred[..., 2:]) * anchors[..., 2:]
-                pred_x1y1 = ctr_pred - wh_pred * 0.5
-                pred_x2y2 = ctr_pred + wh_pred * 0.5
-                box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-                all_fmp_sizes.append(fmp_size)
-                all_cls_preds.append(cls_pred)
-                all_box_preds.append(box_pred)
-                all_anchors.append(anchors)
-            
-            # output dict
-            outputs = {"pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                       "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                       "anchors": all_anchors,           # List(Tensor) [B, M, 4]
-                       'fmp_sizes': all_fmp_sizes,       # List
-                       'strides': self.stride,           # List
-                       }
-
-            return outputs 

+ 0 - 154
models/detectors/yolov5_plus/yolov5_plus_backbone.py

@@ -1,154 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolov5_plus_basic import Conv, ELAN_CSP_Block
-except:
-    from yolov5_plus_basic import Conv, ELAN_CSP_Block
-
-
-# ---------------------------- ImageNet pretrained weights ----------------------------
-model_urls = {
-    'elan_cspnet_nano': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_nano.pth",
-    'elan_cspnet_small': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_small.pth",
-    'elan_cspnet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_medium.pth",
-    'elan_cspnet_large': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_large.pth",
-    'elan_cspnet_huge': None,
-}
-
-
-# ---------------------------- Basic functions ----------------------------
-## ELAN-CSPNet
-class ELAN_CSPNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, ratio=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELAN_CSPNet, self).__init__()
-        self.feat_dims = [int(256 * width), int(512 * width), int(512 * width * ratio)]
-        
-        # stride = 2
-        self.layer_1 =  Conv(3, int(64*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
-        
-        # stride = 4
-        self.layer_2 = nn.Sequential(
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(128*width), int(128*width), nblocks=int(3*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 8
-        self.layer_3 = nn.Sequential(
-            Conv(int(128*width), int(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(256*width), int(256*width), nblocks=int(6*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 16
-        self.layer_4 = nn.Sequential(
-            Conv(int(256*width), int(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(512*width), int(512*width), nblocks=int(6*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 32
-        self.layer_5 = nn.Sequential(
-            Conv(int(512*width), int(512*width*ratio), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(512*width*ratio), int(512*width*ratio), nblocks=int(3*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELAN_CSPNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        ratio=cfg['ratio'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_dpw']
-        )
-    feat_dims = backbone.feat_dims
-        
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['ratio'] == 2.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_nano')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34 and cfg['ratio'] == 2.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67 and cfg['ratio'] == 1.5:
-            backbone = load_weight(backbone, model_name='elan_cspnet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0 and cfg['ratio'] == 1.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.0 and cfg['ratio'] == 1.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_huge')
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 163
models/detectors/yolov5_plus/yolov5_plus_basic.py

@@ -1,163 +0,0 @@
-import torch
-import torch.nn as nn
-
-
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-
-# Basic conv layer
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='silu',      # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# BottleNeck
-class Bottleneck(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expand_ratio=0.5,
-                 shortcut=False,
-                 depthwise=False,
-                 act_type='silu',
-                 norm_type='BN'):
-        super(Bottleneck, self).__init__()
-        inter_dim = int(out_dim * expand_ratio)  # hidden channels            
-        self.cv1 = Conv(in_dim, inter_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.cv2 = Conv(inter_dim, out_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.shortcut = shortcut and in_dim == out_dim
-
-    def forward(self, x):
-        h = self.cv2(self.cv1(x))
-
-        return x + h if self.shortcut else h
-
-
-# ELAN-CSP-Block
-class ELAN_CSP_Block(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expand_ratio=0.5,
-                 nblocks=1,
-                 shortcut=False,
-                 depthwise=False,
-                 act_type='silu',
-                 norm_type='BN'):
-        super(ELAN_CSP_Block, self).__init__()
-        inter_dim = int(out_dim * expand_ratio)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
-        self.cv2 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
-        self.m = nn.Sequential(*(
-            Bottleneck(inter_dim, inter_dim, 1.0, shortcut, depthwise, act_type, norm_type)
-            for _ in range(nblocks)))
-        self.cv3 = Conv((2 + nblocks) * inter_dim, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        out = list([x1, x2])
-
-        out.extend(m(out[-1]) for m in self.m)
-
-        out = self.cv3(torch.cat(out, dim=1))
-
-        return out
-
-
-# ---------------------------- FPN Modules ----------------------------
-## build fpn's core block
-def build_fpn_block(cfg, in_dim, out_dim):
-    if cfg['fpn_core_block'] == 'ELAN_CSPBlock':
-        layer = ELAN_CSP_Block(in_dim=in_dim,
-                               out_dim=out_dim,
-                               expand_ratio=cfg['expand_ratio'],
-                               nblocks=round(3*cfg['depth']),
-                               shortcut=False,
-                               act_type=cfg['fpn_act'],
-                               norm_type=cfg['fpn_norm'],
-                               depthwise=cfg['fpn_depthwise']
-                               )
-        
-    return layer
-
-## build fpn's reduce layer
-def build_reduce_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_reduce_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer
-
-## build fpn's downsample layer
-def build_downsample_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_downsample_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer

+ 0 - 73
models/detectors/yolov5_plus/yolov5_plus_head.py

@@ -1,73 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .yolov5_plus_basic import Conv
-
-
-class DecoupledHead(nn.Module):
-    def __init__(self, cfg, in_dim, out_dim, num_classes=80):
-        super().__init__()
-        print('==============================')
-        print('Head: Decoupled Head')
-        self.in_dim = in_dim
-        self.num_cls_head=cfg['num_cls_head']
-        self.num_reg_head=cfg['num_reg_head']
-        self.act_type=cfg['head_act']
-        self.norm_type=cfg['head_norm']
-
-        # cls head
-        cls_feats = []
-        self.cls_out_dim = max(out_dim, num_classes)
-        for i in range(cfg['num_cls_head']):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
-                        depthwise=cfg['head_depthwise'])
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_out_dim, self.cls_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
-                        depthwise=cfg['head_depthwise'])
-                        )
-        # reg head
-        reg_feats = []
-        self.reg_out_dim = out_dim
-        for i in range(cfg['num_reg_head']):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
-                        depthwise=cfg['head_depthwise'])
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_out_dim, self.reg_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
-                        depthwise=cfg['head_depthwise'])
-                        )
-
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-
-# build detection head
-def build_head(cfg, in_dim, max_dim, num_classes=80):
-    head = DecoupledHead(cfg, in_dim, max_dim, num_classes) 
-
-    return head

+ 0 - 70
models/detectors/yolov5_plus/yolov5_plus_neck.py

@@ -1,70 +0,0 @@
-import torch
-import torch.nn as nn
-from .yolov5_plus_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * cfg['expand_ratio'])
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim)
-
-    return neck
-        

+ 0 - 89
models/detectors/yolov5_plus/yolov5_plus_pafpn.py

@@ -1,89 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolov5_plus_basic import (Conv, build_downsample_layer, build_fpn_block)
-
-
-# YOLO-Style PaFPN
-class Yolov5PlusPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
-        super(Yolov5PlusPaFPN, self).__init__()
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims
-        c3, c4, c5 = in_dims
-        width = cfg['width']
-        ratio = cfg['ratio']
-
-        # --------------------------- Network Parameters ---------------------------
-        ## top dwon
-        ### P5 -> P4
-        self.top_down_layer_1 = build_fpn_block(cfg, c4 + c5, round(512*width))
-
-        ### P4 -> P3
-        self.top_down_layer_2 = build_fpn_block(cfg, c3 + round(512*width), round(256*width))
-
-        ## bottom up
-        ### P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*width), round(256*width))
-        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*width) + round(512*width), round(512*width))
-
-        ### P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*width), round(512*width))
-        self.bottom_up_layer_2 = build_fpn_block(cfg, c5 + round(512*width), round(512*width*ratio))
-                
-        ## output proj layers
-        if out_dim is not None:
-            self.out_layers = nn.ModuleList([
-                Conv(in_dim, out_dim, k=1,
-                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                     for in_dim in [round(256*width), round(512*width), round(512*width*ratio)]
-                     ])
-            self.out_dim = [out_dim] * 3
-        else:
-            self.out_layers = None
-            self.out_dim = [round(256*width), round(512*width), round(512*width*ratio)]
-
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # Top down
-        ## P5 -> P4
-        c6 = F.interpolate(c5, scale_factor=2.0)
-        c7 = torch.cat([c6, c4], dim=1)
-        c8 = self.top_down_layer_1(c7)
-        ## P4 -> P3
-        c9 = F.interpolate(c8, scale_factor=2.0)
-        c10 = torch.cat([c9, c3], dim=1)
-        c11 = self.top_down_layer_2(c10)
-
-        # Bottom up
-        ## p3 -> P4
-        c12 = self.downsample_layer_1(c11)
-        c13 = torch.cat([c12, c8], dim=1)
-        c14 = self.bottom_up_layer_1(c13)
-        ## P4 -> P5
-        c15 = self.downsample_layer_2(c14)
-        c16 = torch.cat([c15, c5], dim=1)
-        c17 = self.bottom_up_layer_2(c16)
-
-        out_feats = [c11, c14, c17] # [P3, P4, P5]
-        
-        # output proj layers
-        if self.out_layers is not None:
-            out_feats_proj = []
-            for feat, layer in zip(out_feats, self.out_layers):
-                out_feats_proj.append(layer(feat))
-            return out_feats_proj
-
-        return out_feats
-
-
-def build_fpn(cfg, in_dims, out_dim=None):
-    model = cfg['fpn']
-    # build pafpn
-    if model == 'yolov5_plus_pafpn':
-        fpn_net = Yolov5PlusPaFPN(cfg, in_dims, out_dim)
-
-    return fpn_net

+ 1 - 1
models/detectors/yolox/yolox.py

@@ -2,7 +2,7 @@ import torch
 import torch.nn as nn
 
 from .yolox_backbone import build_backbone
-from .yolox_fpn import build_fpn
+from .yolox_pafpn import build_fpn
 from .yolox_head import build_head
 
 from utils.misc import multiclass_nms

+ 47 - 25
models/detectors/yolox/yolox_backbone.py

@@ -9,7 +9,11 @@ except:
     from yolox_neck import SPPF
 
 model_urls = {
+    "cspdarknet_nano": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_nano.pth",
+    "cspdarknet_small": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_small.pth",
+    "cspdarknet_medium": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_medium.pth",
     "cspdarknet_large": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_large.pth",
+    "cspdarknet_huge": None,
 }
 
 # CSPDarkNet
@@ -42,7 +46,7 @@ class CSPDarkNet(nn.Module):
         # P5
         self.layer_5 = nn.Sequential(
             Conv(int(512*width), int(1024*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            SPPF(int(1024*width), int(1024*width), expand_ratio=0.5, act_type=act_type, norm_type=norm_type),
+            SPPF(int(1024*width), int(1024*width), expand_ratio=0.5),
             CSPBlock(int(1024*width), int(1024*width), expand_ratio=0.5, nblocks=int(3*depth),
                      shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
         )
@@ -61,6 +65,37 @@ class CSPDarkNet(nn.Module):
 
 
 # ---------------------------- Functions ----------------------------
+## load pretrained weight
+def load_weight(model, model_name):
+    # load weight
+    print('Loading pretrained weight ...')
+    url = model_urls[model_name]
+    if url is not None:
+        checkpoint = torch.hub.load_state_dict_from_url(
+            url=url, map_location="cpu", check_hash=True)
+        # checkpoint state dict
+        checkpoint_state_dict = checkpoint.pop("model")
+        # model state dict
+        model_state_dict = model.state_dict()
+        # check
+        for k in list(checkpoint_state_dict.keys()):
+            if k in model_state_dict:
+                shape_model = tuple(model_state_dict[k].shape)
+                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
+                if shape_model != shape_checkpoint:
+                    checkpoint_state_dict.pop(k)
+            else:
+                checkpoint_state_dict.pop(k)
+                print(k)
+
+        model.load_state_dict(checkpoint_state_dict)
+    else:
+        print('No pretrained for {}'.format(model_name))
+
+    return model
+
+
+## build CSPDarkNet
 def build_backbone(cfg, pretrained=False): 
     """Constructs a darknet-53 model.
     Args:
@@ -69,31 +104,18 @@ def build_backbone(cfg, pretrained=False):
     backbone = CSPDarkNet(cfg['depth'], cfg['width'], cfg['bk_act'], cfg['bk_norm'], cfg['bk_dpw'])
     feat_dims = backbone.feat_dims
 
+    # check whether to load imagenet pretrained weight
     if pretrained:
-        if cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            url = model_urls['cspdarknet_large']
-        if url is not None:
-            print('Loading pretrained weight ...')
-            checkpoint = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            # checkpoint state dict
-            checkpoint_state_dict = checkpoint.pop("model")
-            # model state dict
-            model_state_dict = backbone.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print(k)
-
-            backbone.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No backbone pretrained: CSPDarkNet53')        
+        if cfg['width'] == 0.25 and cfg['depth'] == 0.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_nano')
+        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_small')
+        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
+            backbone = load_weight(backbone, model_name='cspdarknet_medium')
+        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
+            backbone = load_weight(backbone, model_name='cspdarknet_large')
+        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_huge')
 
     return backbone, feat_dims
 

+ 58 - 7
models/detectors/yolox/yolox_neck.py

@@ -1,10 +1,6 @@
 import torch
 import torch.nn as nn
-
-try:
-    from .yolox_basic import Conv
-except:
-    from yolox_basic import Conv
+from .yolox_basic import Conv
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -12,7 +8,7 @@ class SPPF(nn.Module):
     """
         This code referenced to https://github.com/ultralytics/yolov5
     """
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type='', norm_type=''):
+    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type='silu', norm_type='BN'):
         super().__init__()
         inter_dim = int(in_dim * expand_ratio)
         self.out_dim = out_dim
@@ -28,6 +24,51 @@ class SPPF(nn.Module):
         return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
 
 
+# SPPF block with CSP module
+class SPPFBlockCSP(nn.Module):
+    """
+        CSP Spatial Pyramid Pooling Block
+    """
+    def __init__(self,
+                 in_dim,
+                 out_dim,
+                 expand_ratio=0.5,
+                 pooling_size=5,
+                 act_type='silu',
+                 norm_type='BN',
+                 depthwise=False
+                 ):
+        super(SPPFBlockCSP, self).__init__()
+        inter_dim = int(in_dim * expand_ratio)
+        self.out_dim = out_dim
+        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.m = nn.Sequential(
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=act_type, norm_type=norm_type, 
+                 depthwise=depthwise),
+            SPPF(inter_dim, 
+                 inter_dim, 
+                 expand_ratio=1.0, 
+                 pooling_size=pooling_size, 
+                 act_type=act_type, 
+                 norm_type=norm_type),
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=act_type, norm_type=norm_type, 
+                 depthwise=depthwise)
+        )
+        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=act_type, norm_type=norm_type)
+
+        
+    def forward(self, x):
+        x1 = self.cv1(x)
+        x2 = self.cv2(x)
+        x3 = self.m(x2)
+        y = self.cv3(torch.cat([x1, x3], dim=1))
+
+        return y
+
+
 def build_neck(cfg, in_dim, out_dim):
     model = cfg['neck']
     print('==============================')
@@ -42,6 +83,16 @@ def build_neck(cfg, in_dim, out_dim):
             act_type=cfg['neck_act'],
             norm_type=cfg['neck_norm']
             )
+    elif model == 'csp_sppf':
+        neck = SPPFBlockCSP(
+            in_dim=in_dim,
+            out_dim=out_dim,
+            expand_ratio=cfg['expand_ratio'], 
+            pooling_size=cfg['pooling_size'],
+            act_type=cfg['neck_act'],
+            norm_type=cfg['neck_norm'],
+            depthwise=cfg['neck_depthwise']
+            )
 
     return neck
-    
+        

+ 4 - 4
models/detectors/yolox/yolox_fpn.py → models/detectors/yolox/yolox_pafpn.py

@@ -8,7 +8,7 @@ except:
 
 
 # PaFPN-CSP
-class YoloPaFPN(nn.Module):
+class Yolov5PaFPN(nn.Module):
     def __init__(self, 
                  in_dims=[256, 512, 1024],
                  out_dim=256,
@@ -17,7 +17,7 @@ class YoloPaFPN(nn.Module):
                  act_type='silu',
                  norm_type='BN',
                  depthwise=False):
-        super(YoloPaFPN, self).__init__()
+        super(Yolov5PaFPN, self).__init__()
         self.in_dims = in_dims
         self.out_dim = out_dim
         c3, c4, c5 = in_dims
@@ -130,8 +130,8 @@ class YoloPaFPN(nn.Module):
 def build_fpn(cfg, in_dims, out_dim=None):
     model = cfg['fpn']
     # build neck
-    if model == 'yolo_pafpn':
-        fpn_net = YoloPaFPN(in_dims=in_dims,
+    if model == 'yolov5_pafpn':
+        fpn_net = Yolov5PaFPN(in_dims=in_dims,
                              out_dim=out_dim,
                              width=cfg['width'],
                              depth=cfg['depth'],

+ 2 - 2
train.sh

@@ -3,10 +3,10 @@ python train.py \
         --cuda \
         -d coco \
         --root /mnt/share/ssd2/dataset/ \
-        -m yolov5_plus_n \
+        -m yolox_n \
         -bs 16 \
         -size 640 \
-        --wp_epoch 1 \
+        --wp_epoch 3 \
         --max_epoch 300 \
         --eval_epoch 10 \
         --ema \