Procházet zdrojové kódy

fix a unknown bug in YOLOX

yjh0410 před 2 roky
rodič
revize
311a9b89b7
45 změnil soubory, kde provedl 335 přidání a 3869 odebrání
  1. 4 22
      README.md
  2. 4 13
      config/__init__.py
  3. 0 139
      config/model_config/yolo_free_v1_config.py
  4. 0 145
      config/model_config/yolo_free_v2_config.py
  5. 0 15
      config/model_config/yolov1_config.py
  6. 0 15
      config/model_config/yolov2_config.py
  7. 1 33
      config/model_config/yolov3_config.py
  8. 1 33
      config/model_config/yolov4_config.py
  9. 0 80
      config/model_config/yolov5_config.py
  10. 2 47
      config/model_config/yolov7_config.py
  11. 5 85
      config/model_config/yolox_config.py
  12. 57 63
      engine.py
  13. 3 13
      models/detectors/__init__.py
  14. 0 39
      models/detectors/yolo_free_v1/build.py
  15. 0 163
      models/detectors/yolo_free_v1/loss.py
  16. 0 188
      models/detectors/yolo_free_v1/matcher.py
  17. 0 181
      models/detectors/yolo_free_v1/yolo_free_v1.py
  18. 0 154
      models/detectors/yolo_free_v1/yolo_free_v1_backbone.py
  19. 0 191
      models/detectors/yolo_free_v1/yolo_free_v1_basic.py
  20. 0 115
      models/detectors/yolo_free_v1/yolo_free_v1_head.py
  21. 0 70
      models/detectors/yolo_free_v1/yolo_free_v1_neck.py
  22. 0 94
      models/detectors/yolo_free_v1/yolo_free_v1_pafpn.py
  23. 0 143
      models/detectors/yolo_free_v1/yolo_free_v1_pred.py
  24. 0 39
      models/detectors/yolo_free_v2/build.py
  25. 0 285
      models/detectors/yolo_free_v2/loss.py
  26. 0 204
      models/detectors/yolo_free_v2/matcher.py
  27. 0 175
      models/detectors/yolo_free_v2/yolo_free_v2.py
  28. 0 154
      models/detectors/yolo_free_v2/yolo_free_v2_backbone.py
  29. 0 169
      models/detectors/yolo_free_v2/yolo_free_v2_basic.py
  30. 0 111
      models/detectors/yolo_free_v2/yolo_free_v2_head.py
  31. 0 70
      models/detectors/yolo_free_v2/yolo_free_v2_neck.py
  32. 0 93
      models/detectors/yolo_free_v2/yolo_free_v2_pafpn.py
  33. 0 159
      models/detectors/yolo_free_v2/yolo_free_v2_pred.py
  34. 10 15
      models/detectors/yolov7/loss.py
  35. 39 58
      models/detectors/yolov7/matcher.py
  36. 24 2
      models/detectors/yolox/build.py
  37. 10 15
      models/detectors/yolox/loss.py
  38. 39 58
      models/detectors/yolox/matcher.py
  39. 22 19
      models/detectors/yolox/yolox.py
  40. 45 17
      models/detectors/yolox/yolox_basic.py
  41. 15 79
      models/detectors/yolox/yolox_head.py
  42. 48 100
      models/detectors/yolox/yolox_pafpn.py
  43. 3 3
      train.py
  44. 1 1
      train_ddp.sh
  45. 2 2
      utils/misc.py

+ 4 - 22
README.md

@@ -144,31 +144,13 @@ python train.py --cuda -d coco --root path/to/COCO -m yolov1 -bs 16 --max_epoch
 
 * YOLOv7:
 
-| Model    | Backbone      | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|----------|---------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| YOLOv7-T | ELANNet-Tiny  |  640  |  300  |         38.0           |       56.8        |   22.6            |   7.9              | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolov7_tiny_coco.pth) |
-| YOLOv7-L | ELANNet-Large |  640  |  300  |         48.0           |       67.5        |   144.6           |   44.0             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolov7_large_coco.pth) |
+| Model       | Backbone      | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
+|-------------|---------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
+| YOLOv7-Tiny | ELANNet-Tiny  |  640  |  300  |         38.0           |       56.8        |   22.6            |   7.9              | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolov7_tiny_coco.pth) |
+| YOLOv7      | ELANNet-Large |  640  |  300  |         48.0           |       67.5        |   144.6           |   44.0             | [ckpt](https://github.com/yjh0410/PyTorch_YOLO_Tutorial/releases/download/yolo_tutorial_ckpt/yolov7_large_coco.pth) |
 
 *While YOLOv7 incorporates several technical details, such as anchor box, SimOTA, AuxiliaryHead, and RepConv, I found it too challenging to fully reproduce. Instead, I created a simpler version of YOLOv7 using an anchor-free structure and SimOTA. As a result, my reproduction had poor performance due to the absence of the other technical details. However, since it was only intended as a tutorial, I am not too concerned about this gap.*
 
-* FreeYOLOv1 (My YOLO):
-
-| Model        | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|--------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| FreeYOLOv1-N |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv1-S |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv1-M |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv1-L |  640  |  300  |                        |                   |                   |                    |  |
-
-* FreeYOLOv2 (My YOLOv2):
-
-| Model        | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|--------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| FreeYOLOv2-N |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv2-S |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv2-M |  640  |  300  |                        |                   |                   |                    |  |
-| FreeYOLOv2-L |  640  |  300  |                        |                   |                   |                    |  |
-
 * Redesigned RT-DETR:
 
 | Model     | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |

+ 4 - 13
config/__init__.py

@@ -74,7 +74,7 @@ def build_trans_config(trans_config='ssd'):
 
 
 # ------------------ Model Config ----------------------
-## Reproducrd YOLO
+## YOLO series
 from .model_config.yolov1_config import yolov1_cfg
 from .model_config.yolov2_config import yolov2_cfg
 from .model_config.yolov3_config import yolov3_cfg
@@ -82,9 +82,6 @@ from .model_config.yolov4_config import yolov4_cfg
 from .model_config.yolov5_config import yolov5_cfg
 from .model_config.yolov7_config import yolov7_cfg
 from .model_config.yolox_config import yolox_cfg
-## My YOLO
-from .model_config.yolo_free_v1_config import yolo_free_v1_cfg
-from .model_config.yolo_free_v2_config import yolo_free_v2_cfg
 ## Real-Time DETR
 from .model_config.rtdetr_config import rtdetr_cfg
 
@@ -99,26 +96,20 @@ def build_model_config(args):
     elif args.model == 'yolov2':
         cfg = yolov2_cfg
     # YOLOv3
-    elif args.model in ['yolov3', 'yolov3_t']:
+    elif args.model in ['yolov3', 'yolov3_tiny']:
         cfg = yolov3_cfg[args.model]
     # YOLOv4
-    elif args.model in ['yolov4', 'yolov4_t']:
+    elif args.model in ['yolov4', 'yolov4_tiny']:
         cfg = yolov4_cfg[args.model]
     # YOLOv5
     elif args.model in ['yolov5_n', 'yolov5_s', 'yolov5_m', 'yolov5_l', 'yolov5_x']:
         cfg = yolov5_cfg[args.model]
     # YOLOv7
-    elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
+    elif args.model in ['yolov7_tiny', 'yolov7', 'yolov7_x']:
         cfg = yolov7_cfg[args.model]
     # YOLOX
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         cfg = yolox_cfg[args.model]
-    # FreeYOLOv1
-    elif args.model in ['yolo_free_v1_n', 'yolo_free_v1_s', 'yolo_free_v1_m', 'yolo_free_v1_l', 'yolo_free_v1_x']:
-        cfg = yolo_free_v1_cfg[args.model]
-    # FreeYOLOv2
-    elif args.model in ['yolo_free_v2_n', 'yolo_free_v2_s', 'yolo_free_v2_m', 'yolo_free_v2_l', 'yolo_free_v2_x']:
-        cfg = yolo_free_v2_cfg[args.model]
     # RT-DETR
     elif args.model in ['rtdetr_n', 'rtdetr_s', 'rtdetr_m', 'rtdetr_l', 'rtdetr_x']:
         cfg = rtdetr_cfg[args.model]

+ 0 - 139
config/model_config/yolo_free_v1_config.py

@@ -1,139 +0,0 @@
-# FreeYOLOv`` Config
-
-
-yolo_free_v1_cfg = {
-    'yolo_free_v1_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'yolo_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolox_nano',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        # ---------------- Train config ----------------
-        ## Training configuration
-        'no_aug_epoch': 20,
-        'trainer_type': 'rtmdet',
-        ## Optimizer
-        'optimizer': 'adamw',      # optional: sgd, adam, adamw
-        'momentum': None,          # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-2,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 35,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9998,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.001,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolo_free_v1_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'yolo_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.25],   # 320 -> 800
-        'trans_type': 'yolox_large',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': {'center_sampling_radius': 2.5,
-                    'topk_candicate': 10},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'loss_obj_weight': 1.0,
-        'loss_cls_weight': 1.0,
-        'loss_box_weight': 5.0,
-        # ---------------- Train config ----------------
-        ## Training configuration
-        'no_aug_epoch': 20,
-        'trainer_type': 'rtmdet',
-        ## Optimizer
-        'optimizer': 'adamw',      # optional: sgd, adam, adamw
-        'momentum': None,          # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-2,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 35,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9998,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.001,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-}

+ 0 - 145
config/model_config/yolo_free_v2_config.py

@@ -1,145 +0,0 @@
-# FreeYOLOv2 Config
-
-
-yolo_free_v2_cfg = {
-    'yolo_free_v2_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'csp_sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'yolo_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'elan_cspblock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolox_nano',
-        # ---------------- Assignment config ----------------
-        'matcher': {'topk': 10,
-                    'alpha': 0.5,
-                    'beta': 6.0},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'cls_loss': 'bce',   # optional: vfl
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        ## Training configuration
-        'no_aug_epoch': 20,
-        'trainer_type': 'rtmdet',
-        ## Optimizer
-        'optimizer': 'adamw',      # optional: sgd, adam, adamw
-        'momentum': None,          # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-2,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 35,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9998,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.001,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-    'yolo_free_v2_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elan_cspnet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'csp_sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'yolo_pafpn',
-        'fpn_reduce_layer': 'Conv',
-        'fpn_downsample_layer': 'Conv',
-        'fpn_core_block': 'elan_cspblock',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        'reg_max': 16,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.25],   # 320 -> 800
-        'trans_type': 'yolox_large',
-        # ---------------- Assignment config ----------------
-        'matcher': {'topk': 10,
-                    'alpha': 0.5,
-                    'beta': 6.0},
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'cls_loss': 'bce',   # optional: vfl
-        'loss_cls_weight': 0.5,
-        'loss_iou_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        ## Training configuration
-        'no_aug_epoch': 20,
-        'trainer_type': 'rtmdet',
-        ## Optimizer
-        'optimizer': 'adamw',      # optional: sgd, adam, adamw
-        'momentum': None,          # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-2,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 35,           # SGD: 10.0;     AdamW: -1
-        ## Model EMA
-        'ema_decay': 0.9998,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## LR schedule
-        'scheduler': 'linear',
-        'lr0': 0.001,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
-    },
-
-}

+ 0 - 15
config/model_config/yolov1_config.py

@@ -28,20 +28,5 @@ yolov1_cfg = {
     'loss_cls_weight': 1.0,
     'loss_box_weight': 5.0,
     # training configuration
-    'no_aug_epoch': -1,
     'trainer_type': 'yolo',
-    # optimizer
-    'optimizer': 'sgd',        # optional: sgd, adam, adamw
-    'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-    'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-    'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-    # model EMA
-    'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-    'ema_tau': 2000,
-    # lr schedule
-    'scheduler': 'linear',
-    'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.004
-    'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-    'warmup_momentum': 0.8,
-    'warmup_bias_lr': 0.1,
 }

+ 0 - 15
config/model_config/yolov2_config.py

@@ -35,20 +35,5 @@ yolov2_cfg = {
     'loss_cls_weight': 1.0,
     'loss_box_weight': 5.0,
     # training configuration
-    'no_aug_epoch': -1,
     'trainer_type': 'yolo',
-    # optimizer
-    'optimizer': 'sgd',        # optional: sgd, adam, adamw
-    'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-    'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-    'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-    # model EMA
-    'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-    'ema_tau': 2000,
-    # lr schedule
-    'scheduler': 'linear',
-    'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.004
-    'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
-    'warmup_momentum': 0.8,
-    'warmup_bias_lr': 0.1,
 }

+ 1 - 33
config/model_config/yolov3_config.py

@@ -45,26 +45,10 @@ yolov3_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 10,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
-    'yolov3_t':{
+    'yolov3_tiny':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'darknet_tiny',
@@ -108,23 +92,7 @@ yolov3_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 10,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
 }

+ 1 - 33
config/model_config/yolov4_config.py

@@ -45,26 +45,10 @@ yolov4_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 10,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
-    'yolov4_t':{
+    'yolov4_tiny':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet_tiny',
@@ -108,23 +92,7 @@ yolov4_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 10,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
 }

+ 0 - 80
config/model_config/yolov5_config.py

@@ -44,23 +44,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
     'yolov5_s':{
@@ -106,23 +90,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
     'yolov5_m':{
@@ -168,23 +136,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
     'yolov5_l':{
@@ -230,23 +182,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
     'yolov5_x':{
@@ -292,23 +228,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
 }

+ 2 - 47
config/model_config/yolov7_config.py

@@ -1,7 +1,7 @@
 # YOLOv7 Config
 
 yolov7_cfg = {
-    'yolov7_t':{
+    'yolov7_tiny':{
         # input
         'trans_type': 'yolov5_nano',
         'multi_scale': [0.5, 1.5], # 320 -> 960
@@ -43,25 +43,10 @@ yolov7_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # training configuration
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        # optimizer
-        'optimizer': 'sgd',        # optional: sgd, adam, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        # model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        # lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
-    'yolov7_l':{
+    'yolov7':{
         # input
         'trans_type': 'yolov5_large',
         'multi_scale': [0.5, 1.25], # 320 -> 800
@@ -103,22 +88,7 @@ yolov7_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # training configuration
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        # optimizer
-        'optimizer': 'sgd',        # optional: sgd, adam, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        # model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        # lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
     'yolov7_x':{
@@ -163,22 +133,7 @@ yolov7_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # training configuration
-        'no_aug_epoch': 20,
         'trainer_type': 'yolo',
-        # optimizer
-        'optimizer': 'sgd',        # optional: sgd, adam, adamw
-        'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        # model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        # lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,               # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
     },
 
 }

+ 5 - 85
config/model_config/yolox_config.py

@@ -43,23 +43,7 @@ yolox_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
-        'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.9,           # SGD: 0.9;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
+        'trainer_type': 'rtmdet',
     },
 
     'yolox_s':{
@@ -103,23 +87,7 @@ yolox_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
-        'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.9,           # SGD: 0.9;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
+        'trainer_type': 'rtmdet',
     },
 
     'yolox_m':{
@@ -163,23 +131,7 @@ yolox_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
-        'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.9,           # SGD: 0.9;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
+        'trainer_type': 'rtmdet',
     },
 
     'yolox_l':{
@@ -223,23 +175,7 @@ yolox_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
-        'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.9,           # SGD: 0.9;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
+        'trainer_type': 'rtmdet',
     },
 
     'yolox_x':{
@@ -283,23 +219,7 @@ yolox_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        ## close strong augmentation
-        'no_aug_epoch': 20,
-        'trainer_type': 'yolo',
-        ## optimizer
-        'optimizer': 'sgd',        # optional: sgd, AdamW
-        'momentum': 0.9,           # SGD: 0.9;    AdamW: None
-        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
-        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
-        ## model EMA
-        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
-        'ema_tau': 2000,
-        ## lr schedule
-        'scheduler': 'linear',
-        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
-        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
-        'warmup_momentum': 0.8,
-        'warmup_bias_lr': 0.1,
+        'trainer_type': 'rtmdet',
     },
 
 }

+ 57 - 63
engine.py

@@ -24,7 +24,7 @@ from dataset.build import build_dataset, build_transform
 
 # Trainer refered to YOLOv8
 class YoloTrainer(object):
-    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion):
+    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
         # ------------------- basic parameters -------------------
         self.args = args
         self.epoch = 0
@@ -32,7 +32,14 @@ class YoloTrainer(object):
         self.last_opt_step = 0
         self.device = device
         self.criterion = criterion
+        self.world_size = world_size
         self.heavy_eval = False
+        self.no_aug_epoch = 20
+        self.clip_grad = 10
+        self.optimizer_dict = {'optimizer': 'sgd', 'momentum': 0.937, 'weight_decay': 5e-4, 'lr0': 0.01}
+        self.ema_dict = {'ema_decay': 0.9999, 'ema_tau': 2000}
+        self.lr_schedule_dict = {'scheduler': 'linear', 'lrf': 0.01}
+        self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}        
 
         # ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
         self.data_cfg = data_cfg
@@ -41,14 +48,13 @@ class YoloTrainer(object):
 
         # ---------------------------- Build Transform ----------------------------
         self.train_transform, self.trans_cfg = build_transform(
-            args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
+            args=args, trans_config=self.trans_cfg, max_stride=model_cfg['max_stride'], is_train=True)
         self.val_transform, _ = build_transform(
-            args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
+            args=args, trans_config=self.trans_cfg, max_stride=model_cfg['max_stride'], is_train=False)
 
         # ---------------------------- Build Dataset & Dataloader ----------------------------
         self.dataset, self.dataset_info = build_dataset(self.args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
-        world_size = distributed_utils.get_world_size()
-        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // world_size, CollateFunc())
+        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
 
         # ---------------------------- Build Evaluator ----------------------------
         self.evaluator = build_evluator(self.args, self.data_cfg, self.val_transform, self.device)
@@ -58,12 +64,11 @@ class YoloTrainer(object):
 
         # ---------------------------- Build Optimizer ----------------------------
         accumulate = max(1, round(64 / self.args.batch_size))
-        self.model_cfg['weight_decay'] *= self.args.batch_size * accumulate / 64
-        self.optimizer, self.start_epoch = build_yolo_optimizer(self.model_cfg, model, self.args.resume)
+        self.optimizer_dict['weight_decay'] *= self.args.batch_size * accumulate / 64
+        self.optimizer, self.start_epoch = build_yolo_optimizer(self.optimizer_dict, model, self.args.resume)
 
         # ---------------------------- Build LR Scheduler ----------------------------
-        self.args.max_epoch += self.args.wp_epoch
-        self.lr_scheduler, self.lf = build_lr_scheduler(self.model_cfg, self.optimizer, self.args.max_epoch)
+        self.lr_scheduler, self.lf = build_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch)
         self.lr_scheduler.last_epoch = self.start_epoch - 1  # do not move
         if self.args.resume:
             self.lr_scheduler.step()
@@ -71,11 +76,7 @@ class YoloTrainer(object):
         # ---------------------------- Build Model-EMA ----------------------------
         if self.args.ema and distributed_utils.get_rank() in [-1, 0]:
             print('Build ModelEMA ...')
-            self.model_ema = ModelEMA(
-                model,
-                self.model_cfg['ema_decay'],
-                self.model_cfg['ema_tau'],
-                self.start_epoch * len(self.train_loader))
+            self.model_ema = ModelEMA(self.ema_dict, model, self.start_epoch * len(self.train_loader))
         else:
             self.model_ema = None
 
@@ -86,7 +87,7 @@ class YoloTrainer(object):
                 self.train_loader.batch_sampler.sampler.set_epoch(epoch)
 
             # check second stage
-            if epoch >= (self.args.max_epoch - self.model_cfg['no_aug_epoch'] - 1):
+            if epoch >= (self.args.max_epoch - self.no_aug_epoch - 1):
                 # close mosaic augmentation
                 if self.train_loader.dataset.mosaic_prob > 0.:
                     print('close Mosaic Augmentation ...')
@@ -176,7 +177,7 @@ class YoloTrainer(object):
         nw = epoch_size * self.args.wp_epoch
         accumulate = accumulate = max(1, round(64 / self.args.batch_size))
 
-        # Train one epoch
+        # train one epoch
         for iter_i, (images, targets) in enumerate(self.train_loader):
             ni = iter_i + self.epoch * epoch_size
             # Warmup
@@ -186,11 +187,11 @@ class YoloTrainer(object):
                 for j, x in enumerate(self.optimizer.param_groups):
                     # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                     x['lr'] = np.interp(
-                        ni, xi, [self.model_cfg['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * self.lf(self.epoch)])
+                        ni, xi, [self.warmup_dict['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * self.lf(self.epoch)])
                     if 'momentum' in x:
-                        x['momentum'] = np.interp(ni, xi, [self.model_cfg['warmup_momentum'], self.model_cfg['momentum']])
+                        x['momentum'] = np.interp(ni, xi, [self.warmup_dict['warmup_momentum'], self.optimizer_dict['momentum']])
                                 
-            # To device
+            # to device
             images = images.to(self.device, non_blocking=True).float() / 255.
 
             # Multi scale
@@ -200,34 +201,34 @@ class YoloTrainer(object):
             else:
                 targets = self.refine_targets(targets, self.args.min_box_size)
                 
-            # Visualize train targets
+            # visualize train targets
             if self.args.vis_tgt:
                 vis_data(images*255, targets)
 
-            # Inference
+            # inference
             with torch.cuda.amp.autocast(enabled=self.args.fp16):
                 outputs = model(images)
-                # Compute loss
+                # loss
                 loss_dict = self.criterion(outputs=outputs, targets=targets)
                 losses = loss_dict['losses']
                 losses *= images.shape[0]  # loss * bs
 
+                # reduce            
                 loss_dict_reduced = distributed_utils.reduce_dict(loss_dict)
 
-                if self.args.distributed:
-                    # gradient averaged between devices in DDP mode
-                    losses *= distributed_utils.get_world_size()
+                # gradient averaged between devices in DDP mode
+                losses *= distributed_utils.get_world_size()
 
-            # Backward
+            # backward
             self.scaler.scale(losses).backward()
 
             # Optimize
             if ni - self.last_opt_step >= accumulate:
-                if self.model_cfg['clip_grad'] > 0:
+                if self.clip_grad > 0:
                     # unscale gradients
                     self.scaler.unscale_(self.optimizer)
                     # clip gradients
-                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.model_cfg['clip_grad'])
+                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.clip_grad)
                 # optimizer.step
                 self.scaler.step(self.optimizer)
                 self.scaler.update()
@@ -237,7 +238,7 @@ class YoloTrainer(object):
                     self.model_ema.update(model)
                 self.last_opt_step = ni
 
-            # Logs
+            # display
             if distributed_utils.is_main_process() and iter_i % 10 == 0:
                 t1 = time.time()
                 cur_lr = [param_group['lr']  for param_group in self.optimizer.param_groups]
@@ -247,11 +248,7 @@ class YoloTrainer(object):
                 log += '[lr: {:.6f}]'.format(cur_lr[2])
                 # loss infor
                 for k in loss_dict_reduced.keys():
-                    if k == 'losses' and self.args.distributed:
-                        world_size = distributed_utils.get_world_size()
-                        log += '[{}: {:.2f}]'.format(k, loss_dict[k] / world_size)
-                    else:
-                        log += '[{}: {:.2f}]'.format(k, loss_dict[k])
+                    log += '[{}: {:.2f}]'.format(k, loss_dict_reduced[k])
 
                 # other infor
                 log += '[time: {:.2f}]'.format(t1 - t0)
@@ -262,7 +259,6 @@ class YoloTrainer(object):
                 
                 t0 = time.time()
         
-        # LR Schedule
         self.lr_scheduler.step()
         
 
@@ -323,14 +319,19 @@ class YoloTrainer(object):
 
 # Trainer refered to RTMDet
 class RTMTrainer(object):
-    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion):
+    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
         # ------------------- basic parameters -------------------
         self.args = args
         self.epoch = 0
         self.best_map = -1.
         self.device = device
         self.criterion = criterion
+        self.world_size = world_size
         self.heavy_eval = False
+        self.optimizer_dict = {'optimizer': 'adamw', 'momentum': None, 'weight_decay': 5e-2, 'lr0': 0.001}
+        self.ema_dict = {'ema_decay': 0.9998, 'ema_tau': 2000}
+        self.lr_schedule_dict = {'scheduler': 'cosine', 'lrf': 0.01}
+        self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}        
 
         # ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
         self.data_cfg = data_cfg
@@ -345,8 +346,7 @@ class RTMTrainer(object):
 
         # ---------------------------- Build Dataset & Dataloader ----------------------------
         self.dataset, self.dataset_info = build_dataset(self.args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
-        world_size = distributed_utils.get_world_size()
-        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // world_size, CollateFunc())
+        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
 
         # ---------------------------- Build Evaluator ----------------------------
         self.evaluator = build_evluator(self.args, self.data_cfg, self.val_transform, self.device)
@@ -355,12 +355,11 @@ class RTMTrainer(object):
         self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
 
         # ---------------------------- Build Optimizer ----------------------------
-        self.model_cfg['lr0'] *= self.args.batch_size / 64
-        self.optimizer, self.start_epoch = build_yolo_optimizer(self.model_cfg, model, self.args.resume)
+        self.optimizer_dict['lr0'] *= self.args.batch_size / 64
+        self.optimizer, self.start_epoch = build_yolo_optimizer(self.optimizer_dict, model, self.args.resume)
 
         # ---------------------------- Build LR Scheduler ----------------------------
-        self.args.max_epoch += self.args.wp_epoch
-        self.lr_scheduler, self.lf = build_lr_scheduler(self.model_cfg, self.optimizer, self.args.max_epoch)
+        self.lr_scheduler, self.lf = build_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch)
         self.lr_scheduler.last_epoch = self.start_epoch - 1  # do not move
         if self.args.resume:
             self.lr_scheduler.step()
@@ -368,11 +367,7 @@ class RTMTrainer(object):
         # ---------------------------- Build Model-EMA ----------------------------
         if self.args.ema and distributed_utils.get_rank() in [-1, 0]:
             print('Build ModelEMA ...')
-            self.model_ema = ModelEMA(
-                model,
-                self.model_cfg['ema_decay'],
-                self.model_cfg['ema_tau'],
-                self.start_epoch * len(self.train_loader))
+            self.model_ema = ModelEMA(self.ema_dict, model, self.start_epoch * len(self.train_loader))
         else:
             self.model_ema = None
 
@@ -607,7 +602,7 @@ class RTMTrainer(object):
 
 # Trainer for DETR
 class DetrTrainer(object):
-    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion):
+    def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
         # ------------------- basic parameters -------------------
         self.args = args
         self.epoch = 0
@@ -615,7 +610,12 @@ class DetrTrainer(object):
         self.last_opt_step = 0
         self.device = device
         self.criterion = criterion
+        self.world_size = world_size
         self.heavy_eval = False
+        self.optimizer_dict = {'optimizer': 'adamw', 'momentum': None, 'weight_decay': 1e-4, 'lr0': 0.0001}
+        self.ema_dict = {'ema_decay': 0.9998, 'ema_tau': 2000}
+        self.lr_schedule_dict = {'scheduler': 'linear', 'lrf': 0.1}
+        self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}        
 
         # ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
         self.data_cfg = data_cfg
@@ -630,8 +630,7 @@ class DetrTrainer(object):
 
         # ---------------------------- Build Dataset & Dataloader ----------------------------
         self.dataset, self.dataset_info = build_dataset(self.args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
-        world_size = distributed_utils.get_world_size()
-        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // world_size, CollateFunc())
+        self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
 
         # ---------------------------- Build Evaluator ----------------------------
         self.evaluator = build_evluator(self.args, self.data_cfg, self.val_transform, self.device)
@@ -640,12 +639,11 @@ class DetrTrainer(object):
         self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
 
         # ---------------------------- Build Optimizer ----------------------------
-        self.model_cfg['lr0'] *= self.args.batch_size / 16.
-        self.optimizer, self.start_epoch = build_detr_optimizer(model_cfg, model, self.args.resume)
+        self.optimizer_dict['lr0'] *= self.args.batch_size / 16.
+        self.optimizer, self.start_epoch = build_detr_optimizer(self.optimizer_dict, model, self.args.resume)
 
         # ---------------------------- Build LR Scheduler ----------------------------
-        self.args.max_epoch += self.args.wp_epoch
-        self.lr_scheduler, self.lf = build_lr_scheduler(self.model_cfg, self.optimizer, self.args.max_epoch)
+        self.lr_scheduler, self.lf = build_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch)
         self.lr_scheduler.last_epoch = self.start_epoch - 1  # do not move
         if self.args.resume:
             self.lr_scheduler.step()
@@ -653,11 +651,7 @@ class DetrTrainer(object):
         # ---------------------------- Build Model-EMA ----------------------------
         if self.args.ema and distributed_utils.get_rank() in [-1, 0]:
             print('Build ModelEMA ...')
-            self.model_ema = ModelEMA(
-                model,
-                self.model_cfg['ema_decay'],
-                self.model_cfg['ema_tau'],
-                self.start_epoch * len(self.train_loader))
+            self.model_ema = ModelEMA(self.ema_dict, model, self.start_epoch * len(self.train_loader))
         else:
             self.model_ema = None
 
@@ -910,13 +904,13 @@ class DetrTrainer(object):
 
 
 # Build Trainer
-def build_trainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion):
+def build_trainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
     if model_cfg['trainer_type'] == 'yolo':
-        return YoloTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion)
+        return YoloTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
     elif model_cfg['trainer_type'] == 'rtmdet':
-        return RTMTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion)
+        return RTMTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
     elif model_cfg['trainer_type'] == 'detr':
-        return DetrTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion)
+        return DetrTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
     else:
         raise NotImplementedError
     

+ 3 - 13
models/detectors/__init__.py

@@ -9,8 +9,6 @@ from .yolov4.build import build_yolov4
 from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
 from .yolox.build import build_yolox
-from .yolo_free_v1.build import build_yolo_free_v1
-from .yolo_free_v2.build import build_yolo_free_v2
 from .rtdetr.build import build_rtdetr
 
 
@@ -30,11 +28,11 @@ def build_model(args,
         model, criterion = build_yolov2(
             args, model_cfg, device, num_classes, trainable, deploy)
     # YOLOv3   
-    elif args.model in ['yolov3', 'yolov3_t']:
+    elif args.model in ['yolov3', 'yolov3_tiny']:
         model, criterion = build_yolov3(
             args, model_cfg, device, num_classes, trainable, deploy)
     # YOLOv4   
-    elif args.model in ['yolov4', 'yolov4_t']:
+    elif args.model in ['yolov4', 'yolov4_tiny']:
         model, criterion = build_yolov4(
             args, model_cfg, device, num_classes, trainable, deploy)
     # YOLOv5   
@@ -42,21 +40,13 @@ def build_model(args,
         model, criterion = build_yolov5(
             args, model_cfg, device, num_classes, trainable, deploy)
     # YOLOv7
-    elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
+    elif args.model in ['yolov7_tiny', 'yolov7', 'yolov7_x']:
         model, criterion = build_yolov7(
             args, model_cfg, device, num_classes, trainable, deploy)
     # YOLOX   
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         model, criterion = build_yolox(
             args, model_cfg, device, num_classes, trainable, deploy)
-    # FreeYOLOv1
-    elif args.model in ['yolo_free_v1_n', 'yolo_free_v1_s', 'yolo_free_v1_m', 'yolo_free_v1_l', 'yolo_free_v1_x']:
-        model, criterion = build_yolo_free_v1(
-            args, model_cfg, device, num_classes, trainable, deploy)
-    # FreeYOLOv2
-    elif args.model in ['yolo_free_v2_n', 'yolo_free_v2_s', 'yolo_free_v2_m', 'yolo_free_v2_l', 'yolo_free_v1_x']:
-        model, criterion = build_yolo_free_v2(
-            args, model_cfg, device, num_classes, trainable, deploy)
     # RT-DETR
     elif args.model in ['rtdetr_n', 'rtdetr_s', 'rtdetr_m', 'rtdetr_l', 'rtdetr_x']:
         model, criterion = build_rtdetr(

+ 0 - 39
models/detectors/yolo_free_v1/build.py

@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .yolo_free_v1 import FreeYOLOv1
-
-
-# build object detector
-def build_yolo_free_v1(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-        
-    # -------------- Build YOLO --------------
-    model = FreeYOLOv1(
-        cfg=cfg,
-        device=device, 
-        num_classes=num_classes,
-        trainable=trainable,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        topk=args.topk,
-        deploy=deploy
-        )
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes)
-    return model, criterion

+ 0 - 163
models/detectors/yolo_free_v1/loss.py

@@ -1,163 +0,0 @@
-import torch
-import torch.nn.functional as F
-from .matcher import AlignedSimOTA
-from utils.box_ops import get_ious
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-
-
-class Criterion(object):
-    def __init__(self, 
-                 cfg, 
-                 device, 
-                 num_classes=80):
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        # loss weight
-        self.loss_obj_weight = cfg['loss_obj_weight']
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_box_weight = cfg['loss_box_weight']
-        # matcher
-        matcher_config = cfg['matcher']
-        self.matcher = AlignedSimOTA(
-            num_classes=num_classes,
-            center_sampling_radius=matcher_config['center_sampling_radius'],
-            topk_candidate=matcher_config['topk_candicate']
-            )
-
-
-    def loss_objectness(self, pred_obj, gt_obj):
-        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
-
-        return loss_obj
-    
-
-    def loss_classes(self, pred_cls, gt_label):
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
-
-        return loss_cls
-
-
-    def loss_bboxes(self, pred_box, gt_box):
-        # regression loss
-        ious = get_ious(pred_box, gt_box, "xyxy", 'giou')
-        loss_box = 1.0 - ious
-
-        return loss_box
-
-
-    def __call__(self, outputs, targets, epoch=0):        
-        """
-            outputs['pred_obj']: List(Tensor) [B, M, 1]
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        anchors = outputs['anchors']
-        # preds: [B, M, C]
-        obj_preds = torch.cat(outputs['pred_obj'], dim=1)
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-
-        # label assignment
-        cls_targets = []
-        box_targets = []
-        obj_targets = []
-        fg_masks = []
-
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
-                num_anchors = sum([ab.shape[0] for ab in anchors])
-                # There is no valid gt
-                cls_target = obj_preds.new_zeros((0, self.num_classes))
-                box_target = obj_preds.new_zeros((0, 4))
-                obj_target = obj_preds.new_zeros((num_anchors, 1))
-                fg_mask = obj_preds.new_zeros(num_anchors).bool()
-            else:
-                (
-                    fg_mask,
-                    assigned_labels,
-                    assigned_ious,
-                    assigned_indexs
-                ) = self.matcher(
-                    fpn_strides = fpn_strides,
-                    anchors = anchors,
-                    pred_obj = obj_preds[batch_idx],
-                    pred_cls = cls_preds[batch_idx], 
-                    pred_box = box_preds[batch_idx],
-                    tgt_labels = tgt_labels,
-                    tgt_bboxes = tgt_bboxes
-                    )
-
-                obj_target = fg_mask.unsqueeze(-1)
-                cls_target = F.one_hot(assigned_labels.long(), self.num_classes)
-                cls_target = cls_target * assigned_ious.unsqueeze(-1)
-                box_target = tgt_bboxes[assigned_indexs]
-
-            cls_targets.append(cls_target)
-            box_targets.append(box_target)
-            obj_targets.append(obj_target)
-            fg_masks.append(fg_mask)
-
-        cls_targets = torch.cat(cls_targets, 0)
-        box_targets = torch.cat(box_targets, 0)
-        obj_targets = torch.cat(obj_targets, 0)
-        fg_masks = torch.cat(fg_masks, 0)
-        num_fgs = fg_masks.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ objecntness loss ------------------
-        loss_obj = self.loss_objectness(obj_preds.view(-1, 1), obj_targets.float())
-        loss_obj = loss_obj.sum() / num_fgs
-        
-        # ------------------ classification loss ------------------
-        cls_preds_pos = cls_preds.view(-1, self.num_classes)[fg_masks]
-        loss_cls = self.loss_classes(cls_preds_pos, cls_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets)
-        loss_box = loss_box.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_obj_weight * loss_obj + \
-                 self.loss_cls_weight * loss_cls + \
-                 self.loss_box_weight * loss_box
-
-        loss_dict = dict(
-                loss_obj = loss_obj,
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                losses = losses
-        )
-
-        return loss_dict
-    
-
-def build_criterion(cfg, device, num_classes):
-    criterion = Criterion(
-        cfg=cfg,
-        device=device,
-        num_classes=num_classes
-        )
-
-    return criterion
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 188
models/detectors/yolo_free_v1/matcher.py

@@ -1,188 +0,0 @@
-# ---------------------------------------------------------------------
-# Copyright (c) Megvii Inc. All rights reserved.
-# ---------------------------------------------------------------------
-
-
-import torch
-import torch.nn.functional as F
-from utils.box_ops import *
-
-
-class AlignedSimOTA(object):
-    """
-        This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
-    """
-    def __init__(self, num_classes, center_sampling_radius, topk_candidate ):
-        self.num_classes = num_classes
-        self.center_sampling_radius = center_sampling_radius
-        self.topk_candidate = topk_candidate
-
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_obj, 
-                 pred_cls, 
-                 pred_box, 
-                 tgt_labels,
-                 tgt_bboxes):
-        # [M,]
-        strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        anchors = torch.cat(anchors, dim=0)
-        num_anchor = anchors.shape[0]        
-        num_gt = len(tgt_labels)
-
-        # ----------------------- Find inside points -----------------------
-        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
-            tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
-        obj_preds = pred_obj[fg_mask].float()   # [Mp, 1]
-        cls_preds = pred_cls[fg_mask].float()   # [Mp, C]
-        box_preds = pred_box[fg_mask].float()   # [Mp, 4]
-
-        # ----------------------- Reg cost -----------------------
-        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds)      # [N, Mp]
-        reg_cost = -torch.log(pair_wise_ious + 1e-8)            # [N, Mp]
-
-        # ----------------------- Cls cost -----------------------
-        with torch.cuda.amp.autocast(enabled=False):
-            # [Mp, C]
-            score_preds = torch.sqrt(obj_preds.sigmoid_()* cls_preds.sigmoid_())
-            # [N, Mp, C]
-            score_preds = score_preds.unsqueeze(0).repeat(num_gt, 1, 1)
-            # prepare cls_target
-            cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
-            cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
-            cls_targets *= pair_wise_ious.unsqueeze(-1)  # iou-aware
-            # [N, Mp]
-            cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
-        del score_preds
-
-        #----------------------- Dynamic K-Matching -----------------------
-        cost_matrix = (
-            cls_cost
-            + 3.0 * reg_cost
-            + 100000.0 * (~is_in_boxes_and_center)
-        ) # [N, Mp]
-
-        (
-            assigned_labels,         # [num_fg,]
-            assigned_ious,           # [num_fg,]
-            assigned_indexs,         # [num_fg,]
-        ) = self.dynamic_k_matching(
-            cost_matrix,
-            pair_wise_ious,
-            tgt_labels,
-            num_gt,
-            fg_mask
-            )
-        del cls_cost, cost_matrix, pair_wise_ious, reg_cost
-
-        return fg_mask, assigned_labels, assigned_ious, assigned_indexs
-
-
-    def get_in_boxes_info(
-        self,
-        gt_bboxes,   # [N, 4]
-        anchors,     # [M, 2]
-        strides,     # [M,]
-        num_anchors, # M
-        num_gt,      # N
-        ):
-        # anchor center
-        x_centers = anchors[:, 0]
-        y_centers = anchors[:, 1]
-
-        # [M,] -> [1, M] -> [N, M]
-        x_centers = x_centers.unsqueeze(0).repeat(num_gt, 1)
-        y_centers = y_centers.unsqueeze(0).repeat(num_gt, 1)
-
-        # [N,] -> [N, 1] -> [N, M]
-        gt_bboxes_l = gt_bboxes[:, 0].unsqueeze(1).repeat(1, num_anchors) # x1
-        gt_bboxes_t = gt_bboxes[:, 1].unsqueeze(1).repeat(1, num_anchors) # y1
-        gt_bboxes_r = gt_bboxes[:, 2].unsqueeze(1).repeat(1, num_anchors) # x2
-        gt_bboxes_b = gt_bboxes[:, 3].unsqueeze(1).repeat(1, num_anchors) # y2
-
-        b_l = x_centers - gt_bboxes_l
-        b_r = gt_bboxes_r - x_centers
-        b_t = y_centers - gt_bboxes_t
-        b_b = gt_bboxes_b - y_centers
-        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
-
-        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
-        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
-        # in fixed center
-        center_radius = self.center_sampling_radius
-
-        # [N, 2]
-        gt_centers = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) * 0.5
-        
-        # [1, M]
-        center_radius_ = center_radius * strides.unsqueeze(0)
-
-        gt_bboxes_l = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # x1
-        gt_bboxes_t = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # y1
-        gt_bboxes_r = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # x2
-        gt_bboxes_b = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # y2
-
-        c_l = x_centers - gt_bboxes_l
-        c_r = gt_bboxes_r - x_centers
-        c_t = y_centers - gt_bboxes_t
-        c_b = gt_bboxes_b - y_centers
-        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
-        is_in_centers = center_deltas.min(dim=-1).values > 0.0
-        is_in_centers_all = is_in_centers.sum(dim=0) > 0
-
-        # in boxes and in centers
-        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
-
-        is_in_boxes_and_center = (
-            is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
-        )
-        return is_in_boxes_anchor, is_in_boxes_and_center
-    
-    
-    def dynamic_k_matching(
-        self, 
-        cost, 
-        pair_wise_ious, 
-        gt_classes, 
-        num_gt, 
-        fg_mask
-        ):
-        # Dynamic K
-        # ---------------------------------------------------------------
-        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
-
-        ious_in_boxes_matrix = pair_wise_ious
-        n_candidate_k = min(self.topk_candidate, ious_in_boxes_matrix.size(1))
-        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-        dynamic_ks = dynamic_ks.tolist()
-        for gt_idx in range(num_gt):
-            _, pos_idx = torch.topk(
-                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False
-            )
-            matching_matrix[gt_idx][pos_idx] = 1
-
-        del topk_ious, dynamic_ks, pos_idx
-
-        anchor_matching_gt = matching_matrix.sum(0)
-        if (anchor_matching_gt > 1).sum() > 0:
-            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
-            matching_matrix[:, anchor_matching_gt > 1] *= 0
-            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-
-        fg_mask[fg_mask.clone()] = fg_mask_inboxes
-
-        assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        assigned_labels = gt_classes[assigned_indexs]
-
-        assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
-            fg_mask_inboxes
-        ]
-        return assigned_labels, assigned_ious, assigned_indexs
-    

+ 0 - 181
models/detectors/yolo_free_v1/yolo_free_v1.py

@@ -1,181 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-# --------------- Model components ---------------
-from .yolo_free_v1_backbone import build_backbone
-from .yolo_free_v1_neck import build_neck
-from .yolo_free_v1_pafpn import build_fpn
-from .yolo_free_v1_head import build_det_head
-from .yolo_free_v1_pred import build_pred_layer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# My YOLO
-class FreeYOLOv1(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes = 20, 
-                 conf_thresh = 0.05,
-                 nms_thresh = 0.6,
-                 trainable = False, 
-                 topk = 1000,
-                 deploy = False):
-        super(FreeYOLOv1, self).__init__()
-        # ---------------------- Basic Parameters ----------------------
-        self.cfg = cfg
-        self.device = device
-        self.stride = cfg['stride']
-        self.num_classes = num_classes
-        self.trainable = trainable
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        self.deploy = deploy
-        self.head_dim = round(256*cfg['width'])
-        
-        # ---------------------- Network Parameters ----------------------
-        ## ----------- Backbone -----------
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
-
-        ## ----------- Neck: SPP -----------
-        self.neck = build_neck(cfg=cfg, in_dim=feats_dim[-1], out_dim=feats_dim[-1])
-        feats_dim[-1] = self.neck.out_dim
-        
-        ## ----------- Neck: FPN -----------
-        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))
-        self.fpn_dims = self.fpn.out_dim
-
-        ## ----------- Heads -----------
-        self.det_heads = build_det_head(
-            cfg, self.fpn_dims, self.head_dim, num_classes, num_levels=len(self.stride))
-
-        ## ----------- Preds -----------
-        self.pred_layers = build_pred_layer(
-            self.det_heads.cls_head_dim, self.det_heads.reg_head_dim,
-            self.stride, num_classes, num_coords=4, num_levels=len(self.stride))
-
-
-    ## post-process
-    def post_process(self, obj_preds, cls_preds, box_preds):
-        """
-        Input:
-            obj_preds: List(Tensor) [[H x W, 1], ...]
-            cls_preds: List(Tensor) [[H x W, C], ...]
-            box_preds: List(Tensor) [[H x W, 4], ...]
-            anchors:   List(Tensor) [[H x W, 2], ...]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for obj_pred_i, cls_pred_i, box_pred_i in zip(obj_preds, cls_preds, box_preds):
-            obj_pred_i = obj_pred_i[0]
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            
-            # (H x W x KA x C,)
-            scores_i = (torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid())).flatten()
-
-            # Keep top k top scoring indices only.
-            num_topk = min(self.topk, box_pred_i.size(0))
-
-            # torch.sort is actually faster than .topk (at least on GPUs)
-            predicted_prob, topk_idxs = scores_i.sort(descending=True)
-            topk_scores = predicted_prob[:num_topk]
-            topk_idxs = topk_idxs[:num_topk]
-
-            # filter out the proposals with low confidence score
-            keep_idxs = topk_scores > self.conf_thresh
-            scores = topk_scores[keep_idxs]
-            topk_idxs = topk_idxs[keep_idxs]
-
-            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-            labels = topk_idxs % self.num_classes
-
-            bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores)
-        labels = torch.cat(all_labels)
-        bboxes = torch.cat(all_bboxes)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
-
-        return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred_layers(cls_feats, reg_feats)
-
-        all_obj_preds = outputs['pred_obj']
-        all_cls_preds = outputs['pred_cls']
-        all_box_preds = outputs['pred_box']
-
-        if self.deploy:
-            obj_preds = torch.cat(all_obj_preds, dim=1)[0]
-            cls_preds = torch.cat(all_cls_preds, dim=1)[0]
-            box_preds = torch.cat(all_box_preds, dim=1)[0]
-            scores = torch.sqrt(obj_preds.sigmoid() * cls_preds.sigmoid())
-            bboxes = box_preds
-            # [n_anchors_all, 4 + C]
-            outputs = torch.cat([bboxes, scores], dim=-1)
-
-            return outputs
-        else:
-            # post process
-            bboxes, scores, labels = self.post_process(
-                all_obj_preds, all_cls_preds, all_box_preds)
-        
-            return bboxes, scores, labels
-
-
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # ---------------- Backbone ----------------
-            pyramid_feats = self.backbone(x)
-
-            # ---------------- Neck: SPP ----------------
-            pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-            # ---------------- Neck: PaFPN ----------------
-            pyramid_feats = self.fpn(pyramid_feats)
-
-            # ---------------- Heads ----------------
-            cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-            # ---------------- Preds ----------------
-            outputs = self.pred_layers(cls_feats, reg_feats)
-            
-            return outputs 

+ 0 - 154
models/detectors/yolo_free_v1/yolo_free_v1_backbone.py

@@ -1,154 +0,0 @@
-import torch
-import torch.nn as nn
-try:
-    from .yolo_free_v1_basic import Conv, ELANBlock, DownSample
-except:
-    from yolo_free_v1_basic import Conv, ELANBlock, DownSample
-
-
-
-model_urls = {
-    'elannet_pico': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_pico.pth",
-    'elannet_nano': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_nano.pth",
-    'elannet_small': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_small.pth",
-    'elannet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_medium.pth",
-    'elannet_large': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_large.pth",
-    'elannet_huge': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_huge.pth",
-}
-
-
-# ---------------------------- Backbones ----------------------------
-# ELANNet-P5
-class ELANNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANNet, self).__init__()
-        self.feat_dims = [int(512 * width), int(1024 * width), int(1024 * width)]
-        
-        # P1/2
-        self.layer_1 = nn.Sequential(
-            Conv(3, int(64*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            Conv(int(64*width), int(64*width), k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P2/4
-        self.layer_2 = nn.Sequential(   
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),             
-            ELANBlock(in_dim=int(128*width), out_dim=int(256*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P3/8
-        self.layer_3 = nn.Sequential(
-            DownSample(in_dim=int(256*width), out_dim=int(256*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(256*width), out_dim=int(512*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P4/16
-        self.layer_4 = nn.Sequential(
-            DownSample(in_dim=int(512*width), out_dim=int(512*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(512*width), out_dim=int(1024*width), expand_ratio=0.5, depth=depth,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # P5/32
-        self.layer_5 = nn.Sequential(
-            DownSample(in_dim=int(1024*width), out_dim=int(1024*width), act_type=act_type, norm_type=norm_type),             
-            ELANBlock(in_dim=int(1024*width), out_dim=int(1024*width), expand_ratio=0.25, depth=depth,
-                    act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELANNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_dpw']
-        )
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['bk_dpw']:
-            backbone = load_weight(backbone, model_name='elannet_pico')
-        elif cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_nano')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='elannet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='elannet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='elannet_huge')
-    feat_dims = backbone.feat_dims
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': True,
-        'width': 0.25,
-        'depth': 0.34,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 191
models/detectors/yolo_free_v1/yolo_free_v1_basic.py

@@ -1,191 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-# ---------------------------- 2D CNN ----------------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-
-# Basic conv layer
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        p = p if d == 1 else d
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# ---------------------------- Modified YOLOv7's Modules ----------------------------
-## ELANBlock
-class ELANBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANBlock, self).__init__()
-        if isinstance(expand_ratio, float):
-            inter_dim = int(in_dim * expand_ratio)
-            inter_dim2 = inter_dim
-        elif isinstance(expand_ratio, list):
-            assert len(expand_ratio) == 2
-            e1, e2 = expand_ratio
-            inter_dim = int(in_dim * e1)
-            inter_dim2 = int(inter_dim * e2)
-        # branch-1
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-2
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-3
-        for idx in range(round(3*depth)):
-            if idx == 0:
-                cv3 = [Conv(inter_dim, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cv3.append(Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*cv3)
-        # branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(round(3*depth))
-        ])
-        # output
-        self.out = Conv(inter_dim*2 + inter_dim2*2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        """
-        Input:
-            x: [B, C_in, H, W]
-        Output:
-            out: [B, C_out, H, W]
-        """
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-## DownSample
-class DownSample(nn.Module):
-    def __init__(self, in_dim, out_dim, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.mp = nn.MaxPool2d((2, 2), 2)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = nn.Sequential(
-            Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
-            Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-    def forward(self, x):
-        """
-        Input:
-            x: [B, C, H, W]
-        Output:
-            out: [B, C, H//2, W//2]
-        """
-        # [B, C, H, W] -> [B, C//2, H//2, W//2]
-        x1 = self.cv1(self.mp(x))
-        x2 = self.cv2(x)
-
-        # [B, C, H//2, W//2]
-        out = torch.cat([x1, x2], dim=1)
-
-        return out
-
-
-# ---------------------------- FPN Modules ----------------------------
-## build fpn's core block
-def build_fpn_block(cfg, in_dim, out_dim):
-    if cfg['fpn_core_block'] == 'elanblock':
-        layer = ELANBlock(in_dim=in_dim,
-                          out_dim=out_dim,
-                          expand_ratio=[0.5, 0.5],
-                          depth=cfg['depth'],
-                          act_type=cfg['fpn_act'],
-                          norm_type=cfg['fpn_norm'],
-                          depthwise=cfg['fpn_depthwise']
-                          )
-        
-    return layer
-
-## build fpn's reduce layer
-def build_reduce_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_reduce_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer
-
-## build fpn's downsample layer
-def build_downsample_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_downsample_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer

+ 0 - 115
models/detectors/yolo_free_v1/yolo_free_v1_head.py

@@ -1,115 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .yolo_free_v1_basic import Conv
-
-
-class SingleLevelHead(nn.Module):
-    def __init__(self, in_dim, out_dim, num_classes, num_cls_head, num_reg_head, act_type, norm_type, depthwise):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_classes = num_classes
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = max(out_dim, num_classes)
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                        act_type=act_type,
-                        norm_type=norm_type,
-                        depthwise=depthwise)
-                        )      
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = out_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-
-class MultiLevelHead(nn.Module):
-    def __init__(self, cfg, in_dims, out_dim, num_classes=80, num_levels=3):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SingleLevelHead(
-                in_dims[level],
-                out_dim,
-                num_classes,
-                cfg['num_cls_head'],
-                cfg['num_reg_head'],
-                cfg['head_act'],
-                cfg['head_norm'],
-                cfg['head_depthwise'])
-                for level in range(num_levels)
-            ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.num_classes = num_classes
-
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-    
-
-# build detection head
-def build_det_head(cfg, in_dim, out_dim, num_classes=80, num_levels=3):
-    if cfg['head'] == 'decoupled_head':
-        head = MultiLevelHead(cfg, in_dim, out_dim, num_classes, num_levels) 
-
-    return head

+ 0 - 70
models/detectors/yolo_free_v1/yolo_free_v1_neck.py

@@ -1,70 +0,0 @@
-import torch
-import torch.nn as nn
-from .yolo_free_v1_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-
-    return neck
-        

+ 0 - 94
models/detectors/yolo_free_v1/yolo_free_v1_pafpn.py

@@ -1,94 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolo_free_v1_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
-
-
-# YOLO-Style PaFPN
-class YoloPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
-        super(YoloPaFPN, self).__init__()
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims
-        c3, c4, c5 = in_dims
-        width = cfg['width']
-
-        # --------------------------- Network Parameters ---------------------------
-        ## top dwon
-        ### P5 -> P4
-        self.reduce_layer_1 = build_reduce_layer(cfg, c5, round(512*width))
-        self.reduce_layer_2 = build_reduce_layer(cfg, c4, round(512*width))
-        self.top_down_layer_1 = build_fpn_block(cfg, round(512*width) + round(512*width), round(512*width))
-
-        ### P4 -> P3
-        self.reduce_layer_3 = build_reduce_layer(cfg, round(512*width), round(256*width))
-        self.reduce_layer_4 = build_reduce_layer(cfg, c3, round(256*width))
-        self.top_down_layer_2 = build_fpn_block(cfg, round(256*width) + round(256*width), round(256*width))
-
-        ## bottom up
-        ### P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*width), round(256*width))
-        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*width) + round(256*width), round(512*width))
-
-        ### P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*width), round(512*width))
-        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*width) + round(512*width), round(1024*width))
-                
-        ## output proj layers
-        if out_dim is not None:
-            self.out_layers = nn.ModuleList([
-                Conv(in_dim, out_dim, k=1,
-                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                     for in_dim in [round(256*width), round(512*width), round(1024*width)]
-                     ])
-            self.out_dim = [out_dim] * 3
-        else:
-            self.out_layers = None
-            self.out_dim = [round(256*width), round(512*width), round(1024*width)]
-
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # Top down
-        ## P5 -> P4
-        c6 = self.reduce_layer_1(c5)
-        c7 = self.reduce_layer_2(c4)
-        c8 = torch.cat([F.interpolate(c6, scale_factor=2.0), c7], dim=1)
-        c9 = self.top_down_layer_1(c8)
-        ## P4 -> P3
-        c10 = self.reduce_layer_3(c9)
-        c11 = self.reduce_layer_4(c3)
-        c12 = torch.cat([F.interpolate(c10, scale_factor=2.0), c11], dim=1)
-        c13 = self.top_down_layer_2(c12)
-
-        # Bottom up
-        # p3 -> P4
-        c14 = self.downsample_layer_1(c13)
-        c15 = torch.cat([c14, c10], dim=1)
-        c16 = self.bottom_up_layer_1(c15)
-        # P4 -> P5
-        c17 = self.downsample_layer_2(c16)
-        c18 = torch.cat([c17, c6], dim=1)
-        c19 = self.bottom_up_layer_2(c18)
-
-        out_feats = [c13, c16, c19] # [P3, P4, P5]
-        
-        # output proj layers
-        if self.out_layers is not None:
-            out_feats_proj = []
-            for feat, layer in zip(out_feats, self.out_layers):
-                out_feats_proj.append(layer(feat))
-            return out_feats_proj
-
-        return out_feats
-
-
-def build_fpn(cfg, in_dims, out_dim=None):
-    model = cfg['fpn']
-    # build pafpn
-    if model == 'yolo_pafpn':
-        fpn_net = YoloPaFPN(cfg, in_dims, out_dim)
-
-    return fpn_net

+ 0 - 143
models/detectors/yolo_free_v1/yolo_free_v1_pred.py

@@ -1,143 +0,0 @@
-import torch
-import torch.nn as nn
-
-
-class SingleLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, num_classes, num_coords=4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.obj_pred = nn.Conv2d(reg_dim, 1, kernel_size=1)
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-
-    def init_bias(self):
-        # Init bias
-        init_prob = 0.01
-        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-        # obj pred
-        b = self.obj_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # cls pred
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        w = self.reg_pred.weight
-        w.data.fill_(0.)
-        self.reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
-
-
-    def forward(self, cls_feat, reg_feat):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        obj_pred = self.obj_pred(reg_feat)
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        return obj_pred, cls_pred, reg_pred
-    
-
-class MultiLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-        self.num_levels = num_levels
-
-        # ----------- Network Parameters -----------
-        self.multi_level_preds = nn.ModuleList(
-            [SingleLevelPredLayer(
-                cls_dim,
-                reg_dim,
-                num_classes,
-                num_coords)
-                for _ in range(num_levels)
-            ])
-
-    def generate_anchors(self, level, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.strides[level]
-
-        return anchors
-        
-
-    def decode_bbox(self, reg_pred, anchors, stride):
-        ctr_pred = reg_pred[..., :2] * stride + anchors[..., :2]
-        wh_pred = torch.exp(reg_pred[..., 2:]) * stride
-        pred_x1y1 = ctr_pred - wh_pred * 0.5
-        pred_x2y2 = ctr_pred + wh_pred * 0.5
-        box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-        return box_pred
-    
-
-    def forward(self, cls_feats, reg_feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        all_anchors = []
-        all_obj_preds = []
-        all_cls_preds = []
-        all_box_preds = []
-        for level in range(self.num_levels):
-            obj_pred, cls_pred, reg_pred = self.multi_level_preds[level](
-                cls_feats[level], reg_feats[level])
-
-            B, _, H, W = cls_pred.size()
-            fmp_size = [H, W]
-            # generate anchor boxes: [M, 4]
-            anchors = self.generate_anchors(level, fmp_size)
-            anchors = anchors.to(cls_pred.device)
-            
-            # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-            obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 1)
-            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
-            box_pred = self.decode_bbox(reg_pred, anchors, self.strides[level])
-
-            all_obj_preds.append(obj_pred)
-            all_cls_preds.append(cls_pred)
-            all_box_preds.append(box_pred)
-            all_anchors.append(anchors)
-
-            # output dict
-            outputs = {"pred_obj": all_obj_preds,        # List(Tensor) [B, M, 1]
-                       "pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                       "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                       "anchors": all_anchors,           # List(Tensor) [B, M, 2]
-                       "strides": self.strides}           # List(Int) [8, 16, 32]
-
-        return outputs
-    
-
-# build detection head
-def build_pred_layer(cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3):
-    pred_layers = MultiLevelPredLayer(cls_dim, reg_dim, strides, num_classes, num_coords, num_levels) 
-
-    return pred_layers

+ 0 - 39
models/detectors/yolo_free_v2/build.py

@@ -1,39 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .yolo_free_v2 import FreeYOLOv2
-
-
-# build object detector
-def build_yolo_free_v2(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-        
-    # -------------- Build YOLO --------------
-    model = FreeYOLOv2(
-        cfg=cfg,
-        device=device, 
-        num_classes=num_classes,
-        trainable=trainable,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        topk=args.topk,
-        deploy=deploy
-        )
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes)
-    return model, criterion

+ 0 - 285
models/detectors/yolo_free_v2/loss.py

@@ -1,285 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from .matcher import TaskAlignedAssigner
-from utils.box_ops import bbox2dist, bbox_iou
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-
-
-class Criterion(object):
-    def __init__(self, cfg, device, num_classes=80):
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        self.reg_max = cfg['reg_max']
-        self.use_dfl = cfg['reg_max'] > 1
-        # loss
-        self.cls_lossf = ClassificationLoss(cfg, reduction='none')
-        self.reg_lossf = RegressionLoss(num_classes, cfg['reg_max'] - 1, self.use_dfl)
-        # loss weight
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_iou_weight = cfg['loss_iou_weight']
-        self.loss_dfl_weight = cfg['loss_dfl_weight']
-        # matcher
-        matcher_config = cfg['matcher']
-        self.matcher = TaskAlignedAssigner(
-            topk=matcher_config['topk'],
-            num_classes=num_classes,
-            alpha=matcher_config['alpha'],
-            beta=matcher_config['beta']
-            )
-
-
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_regs']: List(Tensor) [B, M, 4*(reg_max+1)]
-            outputs['pred_boxs']: List(Tensor) [B, M, 4]
-            outputs['anchors']: List(Tensor) [M, 2]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            outputs['stride_tensor']: List(Tensor) [M, 1]
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        strides = outputs['stride_tensor']
-        anchors = outputs['anchors']
-        anchors = torch.cat(anchors, dim=0)
-        num_anchors = anchors.shape[0]
-
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        
-        # label assignment
-        gt_label_targets = []
-        gt_score_targets = []
-        gt_bbox_targets = []
-        fg_masks = []
-
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)     # [Mp,]
-            tgt_boxs = targets[batch_idx]["boxes"].to(device)        # [Mp, 4]
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
-                # There is no valid gt
-                fg_mask = cls_preds.new_zeros(1, num_anchors).bool()               #[1, M,]
-                gt_label = cls_preds.new_zeros((1, num_anchors,))                  #[1, M,]
-                gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
-                gt_box = cls_preds.new_zeros((1, num_anchors, 4))                  #[1, M, 4]
-            else:
-                tgt_labels = tgt_labels[None, :, None]      # [1, Mp, 1]
-                tgt_boxs = tgt_boxs[None]                   # [1, Mp, 4]
-                (
-                    gt_label,   #[1, M]
-                    gt_box,     #[1, M, 4]
-                    gt_score,   #[1, M, C]
-                    fg_mask,    #[1, M,]
-                    _
-                ) = self.matcher(
-                    pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(), 
-                    pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
-                    anc_points = anchors,
-                    gt_labels = tgt_labels,
-                    gt_bboxes = tgt_boxs
-                    )
-            gt_label_targets.append(gt_label)
-            gt_score_targets.append(gt_score)
-            gt_bbox_targets.append(gt_box)
-            fg_masks.append(fg_mask)
-
-        # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
-        fg_masks = torch.cat(fg_masks, 0).view(-1)                                    # [BM,]
-        gt_label_targets = torch.cat(gt_label_targets, 0).view(-1)                    # [BM,]
-        gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes)  # [BM, C]
-        gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4)                   # [BM, 4]
-        num_fgs = max(gt_score_targets.sum(), 1)
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        gt_label_targets = torch.where(
-            fg_masks > 0,
-            gt_label_targets,
-            torch.full_like(gt_label_targets, self.num_classes)
-            )
-        gt_labels_one_hot = F.one_hot(gt_label_targets.long(), self.num_classes + 1)[..., :-1]
-        loss_cls = self.cls_lossf(cls_preds, gt_score_targets, gt_labels_one_hot)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ regression loss ------------------
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)                           # [BM, 2]
-        strides = torch.cat(strides, dim=0).unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)  # [BM, 1]
-        bbox_weight = gt_score_targets[fg_masks].sum(-1, keepdim=True)                 # [BM, 1]
-        reg_preds = reg_preds.view(-1, 4*self.reg_max)                                 # [BM, 4*(reg_max + 1)]
-        box_preds = box_preds.view(-1, 4)                                              # [BM, 4]
-        loss_iou, loss_dfl = self.reg_lossf(
-            pred_regs = reg_preds,
-            pred_boxs = box_preds,
-            anchors = anchors,
-            gt_boxs = gt_bbox_targets,
-            bbox_weight = bbox_weight,
-            fg_masks = fg_masks,
-            strides = strides,
-            )
-        loss_iou = loss_iou.sum() / num_fgs
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        losses = loss_cls * self.loss_cls_weight + \
-                 loss_iou * self.loss_iou_weight
-        
-        # loss dict
-        if self.use_dfl:
-            losses += loss_dfl * self.loss_dfl_weight
-            loss_dict = dict(
-                    loss_cls = loss_cls,
-                    loss_iou = loss_iou,
-                    loss_dfl = loss_dfl,
-                    losses = losses
-            )
-        else:
-            loss_dict = dict(
-                    loss_cls = loss_cls,
-                    loss_iou = loss_iou,
-                    losses = losses
-            )
-
-        return loss_dict
-    
-
-class ClassificationLoss(nn.Module):
-    def __init__(self, cfg, reduction='none'):
-        super(ClassificationLoss, self).__init__()
-        self.cfg = cfg
-        self.reduction = reduction
-        # For VFL
-        self.alpha = 0.75
-        self.gamma = 2.0
-
-    def varifocalloss(self, pred_logits, gt_score, gt_label, alpha=0.75, gamma=2.0):
-        focal_weight = alpha * pred_logits.sigmoid().pow(gamma) * (1 - gt_label) + gt_score * gt_label
-        with torch.cuda.amp.autocast(enabled=False):
-            bce_loss = F.binary_cross_entropy_with_logits(
-                pred_logits.float(), gt_score.float(), reduction='none')
-            loss = bce_loss * focal_weight
-
-            if self.reduction == 'sum':
-                loss = loss.sum()
-            elif self.reduction == 'mean':
-                loss = loss.mean()
-
-        return loss
-
-    def binary_cross_entropy(self, pred_logits, gt_score):
-        loss = F.binary_cross_entropy_with_logits(
-            pred_logits.float(), gt_score.float(), reduction='none')
-
-        if self.reduction == 'sum':
-            loss = loss.sum()
-        elif self.reduction == 'mean':
-            loss = loss.mean()
-
-        return loss
-
-
-    def forward(self, pred_logits, gt_score, gt_label):
-        if self.cfg['cls_loss'] == 'bce':
-            return self.binary_cross_entropy(pred_logits, gt_score)
-        elif self.cfg['cls_loss'] == 'vfl':
-            return self.varifocalloss(pred_logits, gt_score, gt_label, self.alpha, self.gamma)
-
-
-class RegressionLoss(nn.Module):
-    def __init__(self, num_classes, reg_max, use_dfl):
-        super(RegressionLoss, self).__init__()
-        self.num_classes = num_classes
-        self.reg_max = reg_max
-        self.use_dfl = use_dfl
-
-
-    def df_loss(self, pred_regs, target):
-        gt_left = target.to(torch.long)
-        gt_right = gt_left + 1
-        weight_left = gt_right.to(torch.float) - target
-        weight_right = 1 - weight_left
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_regs.view(-1, self.reg_max + 1),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_regs.view(-1, self.reg_max + 1),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss = (loss_left + loss_right).mean(-1, keepdim=True)
-        
-        return loss
-
-
-    def forward(self, pred_regs, pred_boxs, anchors, gt_boxs, bbox_weight, fg_masks, strides):
-        """
-        Input:
-            pred_regs: (Tensor) [BM, 4*(reg_max + 1)]
-            pred_boxs: (Tensor) [BM, 4]
-            anchors: (Tensor) [BM, 2]
-            gt_boxs: (Tensor) [BM, 4]
-            bbox_weight: (Tensor) [BM, 1]
-            fg_masks: (Tensor) [BM,]
-            strides: (Tensor) [BM, 1]
-        """
-        # select positive samples mask
-        num_pos = fg_masks.sum()
-
-        if num_pos > 0:
-            pred_boxs_pos = pred_boxs[fg_masks]
-            gt_boxs_pos = gt_boxs[fg_masks]
-
-            # iou loss
-            ious = bbox_iou(pred_boxs_pos,
-                            gt_boxs_pos,
-                            xywh=False,
-                            CIoU=True)
-            loss_iou = (1.0 - ious) * bbox_weight
-               
-            # dfl loss
-            if self.use_dfl:
-                pred_regs_pos = pred_regs[fg_masks]
-                gt_boxs_s = gt_boxs / strides
-                anchors_s = anchors / strides
-                gt_ltrb_s = bbox2dist(anchors_s, gt_boxs_s, self.reg_max)
-                gt_ltrb_s_pos = gt_ltrb_s[fg_masks]
-                loss_dfl = self.df_loss(pred_regs_pos, gt_ltrb_s_pos)
-                loss_dfl *= bbox_weight
-            else:
-                loss_dfl = pred_regs.sum() * 0.
-
-        else:
-            loss_iou = pred_regs.sum() * 0.
-            loss_dfl = pred_regs.sum() * 0.
-
-        return loss_iou, loss_dfl
-
-
-def build_criterion(cfg, device, num_classes):
-    criterion = Criterion(
-        cfg=cfg,
-        device=device,
-        num_classes=num_classes
-        )
-
-    return criterion
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 204
models/detectors/yolo_free_v2/matcher.py

@@ -1,204 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import bbox_iou
-
-
-# -------------------------- Task Aligned Assigner --------------------------
-class TaskAlignedAssigner(nn.Module):
-    def __init__(self,
-                 topk=10,
-                 num_classes=80,
-                 alpha=0.5,
-                 beta=6.0, 
-                 eps=1e-9):
-        super(TaskAlignedAssigner, self).__init__()
-        self.topk = topk
-        self.num_classes = num_classes
-        self.bg_idx = num_classes
-        self.alpha = alpha
-        self.beta = beta
-        self.eps = eps
-
-    @torch.no_grad()
-    def forward(self,
-                pd_scores,
-                pd_bboxes,
-                anc_points,
-                gt_labels,
-                gt_bboxes):
-        """This code referenced to
-           https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py
-        Args:
-            pd_scores (Tensor): shape(bs, num_total_anchors, num_classes)
-            pd_bboxes (Tensor): shape(bs, num_total_anchors, 4)
-            anc_points (Tensor): shape(num_total_anchors, 2)
-            gt_labels (Tensor): shape(bs, n_max_boxes, 1)
-            gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-        Returns:
-            target_labels (Tensor): shape(bs, num_total_anchors)
-            target_bboxes (Tensor): shape(bs, num_total_anchors, 4)
-            target_scores (Tensor): shape(bs, num_total_anchors, num_classes)
-            fg_mask (Tensor): shape(bs, num_total_anchors)
-        """
-        self.bs = pd_scores.size(0)
-        self.n_max_boxes = gt_bboxes.size(1)
-
-        mask_pos, align_metric, overlaps = self.get_pos_mask(
-            pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
-
-        target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
-            mask_pos, overlaps, self.n_max_boxes)
-
-        # assigned target
-        target_labels, target_bboxes, target_scores = self.get_targets(
-            gt_labels, gt_bboxes, target_gt_idx, fg_mask)
-
-        # normalize
-        align_metric *= mask_pos
-        pos_align_metrics = align_metric.amax(axis=-1, keepdim=True)  # b, max_num_obj
-        pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True)  # b, max_num_obj
-        norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
-        target_scores = target_scores * norm_align_metric
-
-        return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
-
-
-    def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
-        # get in_gts mask, (b, max_num_obj, h*w)
-        mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
-        # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
-        # merge all mask to a final mask, (b, max_num_obj, h*w)
-        mask_pos = mask_topk * mask_in_gts
-
-        return mask_pos, align_metric, overlaps
-
-
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
-        ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.long().squeeze(-1)  # b, max_num_obj
-        # get the scores of each grid for each gt cls
-        bbox_scores = pd_scores[ind[0], :, ind[1]]  # b, max_num_obj, h*w
-
-        overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False)
-        overlaps = overlaps.squeeze(3).clamp(0)
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
-
-        return align_metric, overlaps
-
-
-    def select_topk_candidates(self, metrics, largest=True):
-        """
-        Args:
-            metrics: (b, max_num_obj, h*w).
-            topk_mask: (b, max_num_obj, topk) or None
-        """
-
-        num_anchors = metrics.shape[-1]  # h*w
-        # (b, max_num_obj, topk)
-        topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk])
-        # (b, max_num_obj, topk)
-        topk_idxs[~topk_mask] = 0
-        # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
-        # filter invalid bboxes
-        is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
-        return is_in_topk.to(metrics.dtype)
-
-
-    def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        """
-        Args:
-            gt_labels: (b, max_num_obj, 1)
-            gt_bboxes: (b, max_num_obj, 4)
-            target_gt_idx: (b, h*w)
-            fg_mask: (b, h*w)
-        """
-
-        # assigned target labels, (b, 1)
-        batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
-        target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
-        target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
-
-        # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
-        target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
-
-        # assigned target scores
-        target_labels.clamp(0)
-        target_scores = F.one_hot(target_labels, self.num_classes)  # (b, h*w, 80)
-        fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
-        target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
-
-        return target_labels, target_bboxes, target_scores
-    
-
-# -------------------------- Basic Functions --------------------------
-def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
-    """select the positive anchors's center in gt
-    Args:
-        xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
-        gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    n_anchors = xy_centers.size(0)
-    bs, n_max_boxes, _ = gt_bboxes.size()
-    _gt_bboxes = gt_bboxes.reshape([-1, 4])
-    xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
-    gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
-    gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
-    b_lt = xy_centers - gt_bboxes_lt
-    b_rb = gt_bboxes_rb - xy_centers
-    bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
-    bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
-    return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
-
-
-def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
-    """if an anchor box is assigned to multiple gts,
-        the one with the highest iou will be selected.
-    Args:
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-        overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    Return:
-        target_gt_idx (Tensor): shape(bs, num_total_anchors)
-        fg_mask (Tensor): shape(bs, num_total_anchors)
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    fg_mask = mask_pos.sum(axis=-2)
-    if fg_mask.max() > 1:
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
-        max_overlaps_idx = overlaps.argmax(axis=1)
-        is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
-        is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
-        fg_mask = mask_pos.sum(axis=-2)
-    target_gt_idx = mask_pos.argmax(axis=-2)
-    return target_gt_idx, fg_mask , mask_pos
-
-
-def iou_calculator(box1, box2, eps=1e-9):
-    """Calculate iou for batch
-    Args:
-        box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
-        box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    box1 = box1.unsqueeze(2)  # [N, M1, 4] -> [N, M1, 1, 4]
-    box2 = box2.unsqueeze(1)  # [N, M2, 4] -> [N, 1, M2, 4]
-    px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
-    gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
-    x1y1 = torch.maximum(px1y1, gx1y1)
-    x2y2 = torch.minimum(px2y2, gx2y2)
-    overlap = (x2y2 - x1y1).clip(0).prod(-1)
-    area1 = (px2y2 - px1y1).clip(0).prod(-1)
-    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
-    union = area1 + area2 - overlap + eps
-
-    return overlap / union

+ 0 - 175
models/detectors/yolo_free_v2/yolo_free_v2.py

@@ -1,175 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .yolo_free_v2_backbone import build_backbone
-from .yolo_free_v2_neck import build_neck
-from .yolo_free_v2_pafpn import build_fpn
-from .yolo_free_v2_head import build_det_head
-from .yolo_free_v2_pred import build_pred_layer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# My YOLO
-class FreeYOLOv2(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes = 20, 
-                 conf_thresh = 0.05,
-                 nms_thresh = 0.6,
-                 trainable = False, 
-                 topk = 1000,
-                 deploy = False):
-        super(FreeYOLOv2, self).__init__()
-        # ---------------------- Basic Parameters ----------------------
-        self.cfg = cfg
-        self.device = device
-        self.stride = cfg['stride']
-        self.num_classes = num_classes
-        self.trainable = trainable
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        self.deploy = deploy
-        self.head_dim = round(256*cfg['width'])
-        
-        # ---------------------- Network Parameters ----------------------
-        ## ----------- Backbone -----------
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
-
-        ## ----------- Neck: SPP -----------
-        self.neck = build_neck(cfg=cfg, in_dim=feats_dim[-1], out_dim=feats_dim[-1])
-        feats_dim[-1] = self.neck.out_dim
-        
-        ## ----------- Neck: FPN -----------
-        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))
-        self.fpn_dims = self.fpn.out_dim
-
-        ## ----------- Heads -----------
-        self.det_heads = build_det_head(
-            cfg, self.fpn_dims, self.head_dim, num_classes, num_levels=len(self.stride))
-
-        ## ----------- Preds -----------
-        self.pred_layers = build_pred_layer(
-            cfg, self.det_heads.cls_head_dim, self.det_heads.reg_head_dim,
-            self.stride, num_classes, num_levels=len(self.stride))
-
-
-    ## post-process
-    def post_process(self, cls_preds, box_preds):
-        """
-        Input:
-            cls_preds: List(Tensor) [[H x W, C], ...]
-            box_preds: List(Tensor) [[H x W, 4], ...]
-            anchors:   List(Tensor) [[H x W, 2], ...]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            
-            # (H x W x C,)
-            scores_i = cls_pred_i.sigmoid().flatten()
-
-            # Keep top k top scoring indices only.
-            num_topk = min(self.topk, box_pred_i.size(0))
-
-            # torch.sort is actually faster than .topk (at least on GPUs)
-            predicted_prob, topk_idxs = scores_i.sort(descending=True)
-            topk_scores = predicted_prob[:num_topk]
-            topk_idxs = topk_idxs[:num_topk]
-
-            # filter out the proposals with low confidence score
-            keep_idxs = topk_scores > self.conf_thresh
-            scores = topk_scores[keep_idxs]
-            topk_idxs = topk_idxs[keep_idxs]
-
-            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-            labels = topk_idxs % self.num_classes
-
-            bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores)
-        labels = torch.cat(all_labels)
-        bboxes = torch.cat(all_bboxes)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
-
-        return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred_layers(cls_feats, reg_feats)
-
-        all_cls_preds = outputs['pred_cls']
-        all_box_preds = outputs['pred_box']
-
-        if self.deploy:
-            cls_preds = torch.cat(all_cls_preds, dim=1)[0]
-            box_preds = torch.cat(all_box_preds, dim=1)[0]
-            scores = cls_preds.sigmoid()
-            bboxes = box_preds
-            # [n_anchors_all, 4 + C]
-            outputs = torch.cat([bboxes, scores], dim=-1)
-
-            return outputs
-        else:
-            # post process
-            bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-        
-            return bboxes, scores, labels
-
-
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # ---------------- Backbone ----------------
-            pyramid_feats = self.backbone(x)
-
-            # ---------------- Neck: SPP ----------------
-            pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-            # ---------------- Neck: PaFPN ----------------
-            pyramid_feats = self.fpn(pyramid_feats)
-
-            # ---------------- Heads ----------------
-            cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-            # ---------------- Preds ----------------
-            outputs = self.pred_layers(cls_feats, reg_feats)
-            
-            return outputs 

+ 0 - 154
models/detectors/yolo_free_v2/yolo_free_v2_backbone.py

@@ -1,154 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolo_free_v2_basic import Conv, ELAN_CSP_Block
-except:
-    from yolo_free_v2_basic import Conv, ELAN_CSP_Block
-
-
-# ---------------------------- ImageNet pretrained weights ----------------------------
-model_urls = {
-    'elan_cspnet_nano': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_nano.pth",
-    'elan_cspnet_small': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_small.pth",
-    'elan_cspnet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_medium.pth",
-    'elan_cspnet_large': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elan_cspnet_large.pth",
-    'elan_cspnet_huge': None,
-}
-
-
-# ---------------------------- Basic functions ----------------------------
-## ELAN-CSPNet
-class ELAN_CSPNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, ratio=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELAN_CSPNet, self).__init__()
-        self.feat_dims = [int(256 * width), int(512 * width), int(512 * width * ratio)]
-        
-        # stride = 2
-        self.layer_1 =  Conv(3, int(64*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
-        
-        # stride = 4
-        self.layer_2 = nn.Sequential(
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(128*width), int(128*width), nblocks=int(3*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 8
-        self.layer_3 = nn.Sequential(
-            Conv(int(128*width), int(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(256*width), int(256*width), nblocks=int(6*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 16
-        self.layer_4 = nn.Sequential(
-            Conv(int(256*width), int(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(512*width), int(512*width), nblocks=int(6*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        # stride = 32
-        self.layer_5 = nn.Sequential(
-            Conv(int(512*width), int(512*width*ratio), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            ELAN_CSP_Block(int(512*width*ratio), int(512*width*ratio), nblocks=int(3*depth), shortcut=True,
-                           act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELAN_CSPNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        ratio=cfg['ratio'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_dpw']
-        )
-    feat_dims = backbone.feat_dims
-        
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['ratio'] == 2.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_nano')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34 and cfg['ratio'] == 2.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67 and cfg['ratio'] == 1.5:
-            backbone = load_weight(backbone, model_name='elan_cspnet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0 and cfg['ratio'] == 1.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34 and cfg['ratio'] == 1.0:
-            backbone = load_weight(backbone, model_name='elan_cspnet_huge')
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_dpw': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 169
models/detectors/yolo_free_v2/yolo_free_v2_basic.py

@@ -1,169 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-# ---------------------------- 2D CNN ----------------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-
-# Basic conv layer
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        p = p if d == 1 else d
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# ---------------------------- YOLOv8's Modules ----------------------------
-# BottleNeck
-class Bottleneck(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expand_ratio=0.5,
-                 shortcut=False,
-                 depthwise=False,
-                 act_type='silu',
-                 norm_type='BN'):
-        super(Bottleneck, self).__init__()
-        inter_dim = int(out_dim * expand_ratio)  # hidden channels            
-        self.cv1 = Conv(in_dim, inter_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.cv2 = Conv(inter_dim, out_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.shortcut = shortcut and in_dim == out_dim
-
-    def forward(self, x):
-        h = self.cv2(self.cv1(x))
-
-        return x + h if self.shortcut else h
-
-
-# ELAN-CSP-Block
-class ELAN_CSP_Block(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expand_ratio=0.5,
-                 nblocks=1,
-                 shortcut=False,
-                 act_type='silu',
-                 norm_type='BN',
-                 depthwise=False):
-        super(ELAN_CSP_Block, self).__init__()
-        inter_dim = int(out_dim * expand_ratio)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
-        self.cv2 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
-        self.m = nn.Sequential(*(
-            Bottleneck(inter_dim, inter_dim, 1.0, shortcut, depthwise, act_type, norm_type)
-            for _ in range(nblocks)))
-        self.cv3 = Conv((2 + nblocks) * inter_dim, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        out = list([x1, x2])
-
-        out.extend(m(out[-1]) for m in self.m)
-
-        out = self.cv3(torch.cat(out, dim=1))
-
-        return out
-
-
-# ---------------------------- FPN Modules ----------------------------
-## build fpn's core block
-def build_fpn_block(cfg, in_dim, out_dim):
-    if cfg['fpn_core_block'] == 'elan_cspblock':
-        layer = ELAN_CSP_Block(in_dim=in_dim,
-                               out_dim=out_dim,
-                               expand_ratio=0.5,
-                               nblocks=round(3*cfg['depth']),
-                               shortcut=False,
-                               act_type=cfg['fpn_act'],
-                               norm_type=cfg['fpn_norm'],
-                               depthwise=cfg['fpn_depthwise']
-                               )
-        
-    return layer
-
-## build fpn's reduce layer
-def build_reduce_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_reduce_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer
-
-## build fpn's downsample layer
-def build_downsample_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_downsample_layer'] == 'Conv':
-        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer

+ 0 - 111
models/detectors/yolo_free_v2/yolo_free_v2_head.py

@@ -1,111 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .yolo_free_v2_basic import Conv
-
-
-class SingleLevelHead(nn.Module):
-    def __init__(self, cfg, in_dim, out_dim, num_classes):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_classes = num_classes
-        self.num_cls_head = cfg['num_cls_head']
-        self.num_reg_head = cfg['num_reg_head']
-        self.act_type = cfg['head_act']
-        self.norm_type = cfg['head_norm']
-        self.depthwise = cfg['head_depthwise']
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = max(out_dim, num_classes)
-        for i in range(self.num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                         act_type=self.act_type,
-                         norm_type=self.norm_type,
-                         depthwise=self.depthwise)
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
-                        depthwise=self.depthwise)
-                        )      
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = max(out_dim, 4*cfg['reg_max'])
-        for i in range(self.num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=self.act_type,
-                         norm_type=self.norm_type,
-                         depthwise=self.depthwise)
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=self.act_type,
-                         norm_type=self.norm_type,
-                         depthwise=self.depthwise)
-                        )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-
-class MultiLevelHead(nn.Module):
-    def __init__(self, cfg, in_dims, out_dim, num_classes=80, num_levels=3):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SingleLevelHead(
-                cfg,
-                in_dims[level],
-                out_dim,
-                num_classes)
-                for level in range(num_levels)
-            ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.num_classes = num_classes
-
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-    
-
-# build detection head
-def build_det_head(cfg, in_dim, out_dim, num_classes=80, num_levels=3):
-    if cfg['head'] == 'decoupled_head':
-        head = MultiLevelHead(cfg, in_dim, out_dim, num_classes, num_levels) 
-
-    return head

+ 0 - 70
models/detectors/yolo_free_v2/yolo_free_v2_neck.py

@@ -1,70 +0,0 @@
-import torch
-import torch.nn as nn
-from .yolo_free_v2_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-
-    return neck
-        

+ 0 - 93
models/detectors/yolo_free_v2/yolo_free_v2_pafpn.py

@@ -1,93 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolo_free_v2_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
-
-
-# YOLO-Style PaFPN
-class YoloPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
-        super(YoloPaFPN, self).__init__()
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims
-        c3, c4, c5 = in_dims
-        width = cfg['width']
-        ratio = cfg['ratio']
-
-        # --------------------------- Network Parameters ---------------------------
-        ## top dwon
-        ### P5 -> P4
-        self.reduce_layer_1 = build_reduce_layer(cfg, c5, round(512*width))
-        self.top_down_layer_1 = build_fpn_block(cfg, c4 + round(512*width), round(512*width))
-
-        ### P4 -> P3
-        self.reduce_layer_2 = build_reduce_layer(cfg, round(512*width), round(256*width))
-        self.top_down_layer_2 = build_fpn_block(cfg, c3 + round(256*width), round(256*width))
-
-        ## bottom up
-        ### P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*width), round(256*width))
-        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*width) + round(256*width), round(512*width))
-
-        ### P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*width), round(512*width))
-        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*width) + round(512*width), round(512*width*ratio))
-                
-        ## output proj layers
-        if out_dim is not None:
-            self.out_layers = nn.ModuleList([
-                Conv(in_dim, out_dim, k=1,
-                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                     for in_dim in [round(256*width), round(512*width), round(512*width*ratio)]
-                     ])
-            self.out_dim = [out_dim] * 3
-        else:
-            self.out_layers = None
-            self.out_dim = [round(256*width), round(512*width), round(512*width*ratio)]
-
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # Top down
-        ## P5 -> P4
-        c6 = self.reduce_layer_1(c5)
-        c7 = F.interpolate(c6, scale_factor=2.0)
-        c8 = torch.cat([c7, c4], dim=1)
-        c9 = self.top_down_layer_1(c8)
-        ## P4 -> P3
-        c10 = self.reduce_layer_2(c9)
-        c11 = F.interpolate(c10, scale_factor=2.0)   # s16->s8
-        c12 = torch.cat([c11, c3], dim=1)
-        c13 = self.top_down_layer_2(c12)  # to det
-
-        # Bottom up
-        # P3 -> P4
-        c14 = self.downsample_layer_1(c13)
-        c15 = torch.cat([c14, c10], dim=1)
-        c16 = self.bottom_up_layer_1(c15)  # to det
-        # P4 -> P5
-        c17 = self.downsample_layer_2(c16)
-        c18 = torch.cat([c17, c6], dim=1)
-        c19 = self.bottom_up_layer_2(c18)  # to det
-
-        out_feats = [c13, c16, c19] # [P3, P4, P5]
-        
-        # output proj layers
-        if self.out_layers is not None:
-            out_feats_proj = []
-            for feat, layer in zip(out_feats, self.out_layers):
-                out_feats_proj.append(layer(feat))
-            return out_feats_proj
-
-        return out_feats
-
-
-def build_fpn(cfg, in_dims, out_dim=None):
-    model = cfg['fpn']
-    # build pafpn
-    if model == 'yolo_pafpn':
-        fpn_net = YoloPaFPN(cfg, in_dims, out_dim)
-
-    return fpn_net

+ 0 - 159
models/detectors/yolo_free_v2/yolo_free_v2_pred.py

@@ -1,159 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class SingleLevelPredLayer(nn.Module):
-    def __init__(self, cfg, cls_dim, reg_dim, num_classes):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-
-        # --------- Network Parameters ----------
-        ## pred_conv
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, 4*cfg['reg_max'], kernel_size=1)                
-
-        self.init_weight()
-        
-
-    def init_weight(self):
-        # cls pred
-        init_prob = 0.01
-        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        w = self.reg_pred.weight
-        w.data.fill_(0.)
-        self.reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
-
-
-    def forward(self, cls_feat, reg_feat):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        return cls_pred, reg_pred
-    
-
-class MultiLevelPredLayer(nn.Module):
-    def __init__(self, cfg, cls_dim, reg_dim, strides, num_classes, num_levels=3):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_levels = num_levels
-        self.reg_max = cfg['reg_max']
-
-        # ----------- Network Parameters -----------
-        ## proj_conv
-        self.proj = nn.Parameter(torch.linspace(0, cfg['reg_max'], cfg['reg_max']), requires_grad=False)
-        self.proj_conv = nn.Conv2d(cfg['reg_max'], 1, kernel_size=1, bias=False)
-        self.proj_conv.weight = nn.Parameter(self.proj.view([1, cfg['reg_max'], 1, 1]).clone().detach(), requires_grad=False)
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [SingleLevelPredLayer(
-                cfg,
-                cls_dim,
-                reg_dim,
-                num_classes)
-                for _ in range(num_levels)
-            ])
-
-    def generate_anchors(self, level, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.strides[level]
-
-        return anchors
-        
-
-    def decode_bbox(self, reg_pred, anchors, stride):
-        # ----------------------- Decode bbox -----------------------
-        B, M = reg_pred.shape[:2]
-        # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max] -> [B, 4, M, reg_max]
-        reg_pred = reg_pred.reshape([B, M, 4, self.reg_max])
-        # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-        reg_pred = reg_pred.permute(0, 3, 2, 1).contiguous()
-        # [B, reg_max, 4, M] -> [B, 1, 4, M]
-        reg_pred = self.proj_conv(F.softmax(reg_pred, dim=1))
-        # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-        reg_pred = reg_pred.view(B, 4, M).permute(0, 2, 1).contiguous()
-        ## tlbr -> xyxy
-        x1y1_pred = anchors[None] - reg_pred[..., :2] * stride
-        x2y2_pred = anchors[None] + reg_pred[..., 2:] * stride
-        box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-        return box_pred
-    
-
-    def forward(self, cls_feats, reg_feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.num_levels):
-            cls_pred, reg_pred = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            B, _, H, W = cls_pred.size()
-            fmp_size = [H, W]
-            # generate anchor boxes: [M, 4]
-            anchors = self.generate_anchors(level, fmp_size)
-            anchors = anchors.to(cls_pred.device)
-
-            # stride tensor: [M, 1]
-            stride_tensor = torch.ones_like(anchors[..., :1]) * self.strides[level]
-
-            # process preds
-            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-            box_pred = self.decode_bbox(reg_pred, anchors, self.strides[level])
-
-            # collect preds
-            all_cls_preds.append(cls_pred)
-            all_reg_preds.append(reg_pred)
-            all_box_preds.append(box_pred)
-            all_anchors.append(anchors)
-            all_strides.append(stride_tensor)
-
-            # output dict
-            outputs = {"pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                       "pred_reg": all_reg_preds,        # List(Tensor) [B, M, 4*(reg_max)]
-                       "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                       "anchors": all_anchors,           # List(Tensor) [M, 2]
-                       "strides": self.strides,           # List(Int) = [8, 16, 32]
-                       "stride_tensor": all_strides      # List(Tensor) [M, 1]
-                       }
-            
-            return outputs 
-    
-
-# build detection head
-def build_pred_layer(cfg, cls_dim, reg_dim, strides, num_classes, num_levels=3):
-    pred_layers = MultiLevelPredLayer(cfg, cls_dim, reg_dim, strides, num_classes, num_levels) 
-
-    return pred_layers

+ 10 - 15
models/detectors/yolov7/loss.py

@@ -1,5 +1,4 @@
 import torch
-import torch.nn as nn
 import torch.nn.functional as F
 from .matcher import SimOTA
 from utils.box_ops import get_ious
@@ -42,10 +41,7 @@ class Criterion(object):
 
     def loss_bboxes(self, pred_box, gt_box):
         # regression loss
-        ious = get_ious(pred_box,
-                        gt_box,
-                        box_mode="xyxy",
-                        iou_type='giou')
+        ious = get_ious(pred_box, gt_box, "xyxy", 'giou')
         loss_box = 1.0 - ious
 
         return loss_box
@@ -90,11 +86,10 @@ class Criterion(object):
                 fg_mask = obj_preds.new_zeros(num_anchors).bool()
             else:
                 (
-                    gt_matched_classes,
                     fg_mask,
-                    pred_ious_this_matching,
-                    matched_gt_inds,
-                    num_fg_img,
+                    assigned_labels,
+                    assigned_ious,
+                    assigned_indexs
                 ) = self.matcher(
                     fpn_strides = fpn_strides,
                     anchors = anchors,
@@ -106,9 +101,9 @@ class Criterion(object):
                     )
 
                 obj_target = fg_mask.unsqueeze(-1)
-                cls_target = F.one_hot(gt_matched_classes.long(), self.num_classes)
-                cls_target = cls_target * pred_ious_this_matching.unsqueeze(-1)
-                box_target = tgt_bboxes[matched_gt_inds]
+                cls_target = F.one_hot(assigned_labels.long(), self.num_classes)
+                cls_target = cls_target * assigned_ious.unsqueeze(-1)
+                box_target = tgt_bboxes[assigned_indexs]
 
             cls_targets.append(cls_target)
             box_targets.append(box_target)
@@ -125,16 +120,16 @@ class Criterion(object):
             torch.distributed.all_reduce(num_fgs)
         num_fgs = (num_fgs / get_world_size()).clamp(1.0)
 
-        # obj loss
+        # ------------------ objecntness loss ------------------
         loss_obj = self.loss_objectness(obj_preds.view(-1, 1), obj_targets.float())
         loss_obj = loss_obj.sum() / num_fgs
         
-        # cls loss
+        # ------------------ classification loss ------------------
         cls_preds_pos = cls_preds.view(-1, self.num_classes)[fg_masks]
         loss_cls = self.loss_classes(cls_preds_pos, cls_targets)
         loss_cls = loss_cls.sum() / num_fgs
 
-        # regression loss
+        # ------------------ regression loss ------------------
         box_preds_pos = box_preds.view(-1, 4)[fg_masks]
         loss_box = self.loss_bboxes(box_preds_pos, box_targets)
         loss_box = loss_box.sum() / num_fgs

+ 39 - 58
models/detectors/yolov7/matcher.py

@@ -8,7 +8,6 @@ import torch.nn.functional as F
 from utils.box_ops import *
 
 
-# YOLOX SimOTA
 class SimOTA(object):
     """
         This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
@@ -29,76 +28,59 @@ class SimOTA(object):
                  tgt_labels,
                  tgt_bboxes):
         # [M,]
-        strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
+        strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
                                 for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
         # List[F, M, 2] -> [M, 2]
         anchors = torch.cat(anchors, dim=0)
         num_anchor = anchors.shape[0]        
         num_gt = len(tgt_labels)
 
-        fg_mask, is_in_boxes_and_center = \
-            self.get_in_boxes_info(
-                tgt_bboxes,
-                anchors,
-                strides,
-                num_anchor,
-                num_gt
-                )
-
-        obj_preds_ = pred_obj[fg_mask]   # [Mp, 1]
-        cls_preds_ = pred_cls[fg_mask]   # [Mp, C]
-        box_preds_ = pred_box[fg_mask]   # [Mp, 4]
-        num_in_boxes_anchor = box_preds_.shape[0]
-
-        # [N, Mp]
-        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds_)
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
-
-        # [N, C] -> [N, Mp, C]
-        gt_cls = (
-            F.one_hot(tgt_labels.long(), self.num_classes)
-            .float()
-            .unsqueeze(1)
-            .repeat(1, num_in_boxes_anchor, 1)
-        )
+        # ----------------------- Find inside points -----------------------
+        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+            tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
+        obj_preds = pred_obj[fg_mask].float()   # [Mp, 1]
+        cls_preds = pred_cls[fg_mask].float()   # [Mp, C]
+        box_preds = pred_box[fg_mask].float()   # [Mp, 4]
+
+        # ----------------------- Reg cost -----------------------
+        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds)      # [N, Mp]
+        reg_cost = -torch.log(pair_wise_ious + 1e-8)            # [N, Mp]
 
+        # ----------------------- Cls cost -----------------------
         with torch.cuda.amp.autocast(enabled=False):
-            score_preds_ = torch.sqrt(
-                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
-                * obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
-            ) # [N, Mp, C]
-            pair_wise_cls_loss = F.binary_cross_entropy(
-                score_preds_, gt_cls, reduction="none"
-            ).sum(-1) # [N, Mp]
-        del score_preds_
-
-        cost = (
-            pair_wise_cls_loss
-            + 3.0 * pair_wise_ious_loss
+            # [Mp, C]
+            score_preds = torch.sqrt(obj_preds.sigmoid_()* cls_preds.sigmoid_())
+            # [N, Mp, C]
+            score_preds = score_preds.unsqueeze(0).repeat(num_gt, 1, 1)
+            # prepare cls_target
+            cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
+            cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
+            cls_targets *= pair_wise_ious.unsqueeze(-1)  # iou-aware
+            # [N, Mp]
+            cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
+        del score_preds
+
+        #----------------------- Dynamic K-Matching -----------------------
+        cost_matrix = (
+            cls_cost
+            + 3.0 * reg_cost
             + 100000.0 * (~is_in_boxes_and_center)
         ) # [N, Mp]
 
         (
-            num_fg,
-            gt_matched_classes,         # [num_fg,]
-            pred_ious_this_matching,    # [num_fg,]
-            matched_gt_inds,            # [num_fg,]
+            assigned_labels,         # [num_fg,]
+            assigned_ious,           # [num_fg,]
+            assigned_indexs,         # [num_fg,]
         ) = self.dynamic_k_matching(
-            cost,
+            cost_matrix,
             pair_wise_ious,
             tgt_labels,
             num_gt,
             fg_mask
             )
-        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
-
-        return (
-                gt_matched_classes,
-                fg_mask,
-                pred_ious_this_matching,
-                matched_gt_inds,
-                num_fg,
-        )
+        del cls_cost, cost_matrix, pair_wise_ious, reg_cost
+
+        return fg_mask, assigned_labels, assigned_ious, assigned_indexs
 
 
     def get_in_boxes_info(
@@ -193,15 +175,14 @@ class SimOTA(object):
             matching_matrix[:, anchor_matching_gt > 1] *= 0
             matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
         fg_mask_inboxes = matching_matrix.sum(0) > 0
-        num_fg = fg_mask_inboxes.sum().item()
 
         fg_mask[fg_mask.clone()] = fg_mask_inboxes
 
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        gt_matched_classes = gt_classes[matched_gt_inds]
+        assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
+        assigned_labels = gt_classes[assigned_indexs]
 
-        pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[
+        assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
             fg_mask_inboxes
         ]
-        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
+        return assigned_labels, assigned_ious, assigned_indexs
     

+ 24 - 2
models/detectors/yolox/build.py

@@ -21,11 +21,11 @@ def build_yolox(args, cfg, device, num_classes=80, trainable=False, deploy=False
         cfg=cfg,
         device=device, 
         num_classes=num_classes,
-        trainable=trainable,
         conf_thresh=args.conf_thresh,
         nms_thresh=args.nms_thresh,
         topk=args.topk,
-        deploy=deploy
+        trainable = trainable,
+        deploy = deploy
         )
 
     # -------------- Initialize YOLO --------------
@@ -33,6 +33,28 @@ def build_yolox(args, cfg, device, num_classes=80, trainable=False, deploy=False
         if isinstance(m, nn.BatchNorm2d):
             m.eps = 1e-3
             m.momentum = 0.03    
+    # Init bias
+    init_prob = 0.01
+    bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
+    # obj pred
+    for obj_pred in model.obj_preds:
+        b = obj_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+    # cls pred
+    for cls_pred in model.cls_preds:
+        b = cls_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+    # reg pred
+    for reg_pred in model.reg_preds:
+        b = reg_pred.bias.view(-1, )
+        b.data.fill_(1.0)
+        reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+        w = reg_pred.weight
+        w.data.fill_(0.)
+        reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
+
 
     # -------------- Build criterion --------------
     criterion = None

+ 10 - 15
models/detectors/yolox/loss.py

@@ -1,5 +1,4 @@
 import torch
-import torch.nn as nn
 import torch.nn.functional as F
 from .matcher import SimOTA
 from utils.box_ops import get_ious
@@ -42,10 +41,7 @@ class Criterion(object):
 
     def loss_bboxes(self, pred_box, gt_box):
         # regression loss
-        ious = get_ious(pred_box,
-                        gt_box,
-                        box_mode="xyxy",
-                        iou_type='giou')
+        ious = get_ious(pred_box, gt_box, "xyxy", 'giou')
         loss_box = 1.0 - ious
 
         return loss_box
@@ -90,11 +86,10 @@ class Criterion(object):
                 fg_mask = obj_preds.new_zeros(num_anchors).bool()
             else:
                 (
-                    gt_matched_classes,
                     fg_mask,
-                    pred_ious_this_matching,
-                    matched_gt_inds,
-                    num_fg_img,
+                    assigned_labels,
+                    assigned_ious,
+                    assigned_indexs
                 ) = self.matcher(
                     fpn_strides = fpn_strides,
                     anchors = anchors,
@@ -106,9 +101,9 @@ class Criterion(object):
                     )
 
                 obj_target = fg_mask.unsqueeze(-1)
-                cls_target = F.one_hot(gt_matched_classes.long(), self.num_classes)
-                cls_target = cls_target * pred_ious_this_matching.unsqueeze(-1)
-                box_target = tgt_bboxes[matched_gt_inds]
+                cls_target = F.one_hot(assigned_labels.long(), self.num_classes)
+                cls_target = cls_target * assigned_ious.unsqueeze(-1)
+                box_target = tgt_bboxes[assigned_indexs]
 
             cls_targets.append(cls_target)
             box_targets.append(box_target)
@@ -125,16 +120,16 @@ class Criterion(object):
             torch.distributed.all_reduce(num_fgs)
         num_fgs = (num_fgs / get_world_size()).clamp(1.0)
 
-        # obj loss
+        # ------------------ objecntness loss ------------------
         loss_obj = self.loss_objectness(obj_preds.view(-1, 1), obj_targets.float())
         loss_obj = loss_obj.sum() / num_fgs
         
-        # cls loss
+        # ------------------ classification loss ------------------
         cls_preds_pos = cls_preds.view(-1, self.num_classes)[fg_masks]
         loss_cls = self.loss_classes(cls_preds_pos, cls_targets)
         loss_cls = loss_cls.sum() / num_fgs
 
-        # regression loss
+        # ------------------ regression loss ------------------
         box_preds_pos = box_preds.view(-1, 4)[fg_masks]
         loss_box = self.loss_bboxes(box_preds_pos, box_targets)
         loss_box = loss_box.sum() / num_fgs

+ 39 - 58
models/detectors/yolox/matcher.py

@@ -8,7 +8,6 @@ import torch.nn.functional as F
 from utils.box_ops import *
 
 
-# YOLOX SimOTA
 class SimOTA(object):
     """
         This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
@@ -29,76 +28,59 @@ class SimOTA(object):
                  tgt_labels,
                  tgt_bboxes):
         # [M,]
-        strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
+        strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
                                 for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
         # List[F, M, 2] -> [M, 2]
         anchors = torch.cat(anchors, dim=0)
         num_anchor = anchors.shape[0]        
         num_gt = len(tgt_labels)
 
-        fg_mask, is_in_boxes_and_center = \
-            self.get_in_boxes_info(
-                tgt_bboxes,
-                anchors,
-                strides,
-                num_anchor,
-                num_gt
-                )
-
-        obj_preds_ = pred_obj[fg_mask]   # [Mp, 1]
-        cls_preds_ = pred_cls[fg_mask]   # [Mp, C]
-        box_preds_ = pred_box[fg_mask]   # [Mp, 4]
-        num_in_boxes_anchor = box_preds_.shape[0]
-
-        # [N, Mp]
-        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds_)
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
-
-        # [N, C] -> [N, Mp, C]
-        gt_cls = (
-            F.one_hot(tgt_labels.long(), self.num_classes)
-            .float()
-            .unsqueeze(1)
-            .repeat(1, num_in_boxes_anchor, 1)
-        )
+        # ----------------------- Find inside points -----------------------
+        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+            tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
+        obj_preds = pred_obj[fg_mask].float()   # [Mp, 1]
+        cls_preds = pred_cls[fg_mask].float()   # [Mp, C]
+        box_preds = pred_box[fg_mask].float()   # [Mp, 4]
+
+        # ----------------------- Reg cost -----------------------
+        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds)      # [N, Mp]
+        reg_cost = -torch.log(pair_wise_ious + 1e-8)            # [N, Mp]
 
+        # ----------------------- Cls cost -----------------------
         with torch.cuda.amp.autocast(enabled=False):
-            score_preds_ = torch.sqrt(
-                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
-                * obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
-            ) # [N, Mp, C]
-            pair_wise_cls_loss = F.binary_cross_entropy(
-                score_preds_, gt_cls, reduction="none"
-            ).sum(-1) # [N, Mp]
-        del score_preds_
-
-        cost = (
-            pair_wise_cls_loss
-            + 3.0 * pair_wise_ious_loss
+            # [Mp, C]
+            score_preds = torch.sqrt(obj_preds.sigmoid_()* cls_preds.sigmoid_())
+            # [N, Mp, C]
+            score_preds = score_preds.unsqueeze(0).repeat(num_gt, 1, 1)
+            # prepare cls_target
+            cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
+            cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
+            cls_targets *= pair_wise_ious.unsqueeze(-1)  # iou-aware
+            # [N, Mp]
+            cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
+        del score_preds
+
+        #----------------------- Dynamic K-Matching -----------------------
+        cost_matrix = (
+            cls_cost
+            + 3.0 * reg_cost
             + 100000.0 * (~is_in_boxes_and_center)
         ) # [N, Mp]
 
         (
-            num_fg,
-            gt_matched_classes,         # [num_fg,]
-            pred_ious_this_matching,    # [num_fg,]
-            matched_gt_inds,            # [num_fg,]
+            assigned_labels,         # [num_fg,]
+            assigned_ious,           # [num_fg,]
+            assigned_indexs,         # [num_fg,]
         ) = self.dynamic_k_matching(
-            cost,
+            cost_matrix,
             pair_wise_ious,
             tgt_labels,
             num_gt,
             fg_mask
             )
-        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
-
-        return (
-                gt_matched_classes,
-                fg_mask,
-                pred_ious_this_matching,
-                matched_gt_inds,
-                num_fg,
-        )
+        del cls_cost, cost_matrix, pair_wise_ious, reg_cost
+
+        return fg_mask, assigned_labels, assigned_ious, assigned_indexs
 
 
     def get_in_boxes_info(
@@ -193,15 +175,14 @@ class SimOTA(object):
             matching_matrix[:, anchor_matching_gt > 1] *= 0
             matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
         fg_mask_inboxes = matching_matrix.sum(0) > 0
-        num_fg = fg_mask_inboxes.sum().item()
 
         fg_mask[fg_mask.clone()] = fg_mask_inboxes
 
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        gt_matched_classes = gt_classes[matched_gt_inds]
+        assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
+        assigned_labels = gt_classes[assigned_indexs]
 
-        pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[
+        assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
             fg_mask_inboxes
         ]
-        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
+        return assigned_labels, assigned_ious, assigned_indexs
     

+ 22 - 19
models/detectors/yolox/yolox.py

@@ -8,35 +8,34 @@ from .yolox_head import build_head
 from utils.misc import multiclass_nms
 
 
-# YOLOX
 class YOLOX(nn.Module):
-    def __init__(self,
+    def __init__(self, 
                  cfg,
-                 device,
-                 num_classes=20,
-                 conf_thresh=0.01,
-                 nms_thresh=0.5,
-                 topk=100,
-                 trainable=False,
+                 device, 
+                 num_classes = 20, 
+                 conf_thresh = 0.05,
+                 nms_thresh = 0.6,
+                 trainable = False, 
+                 topk = 1000,
                  deploy = False):
         super(YOLOX, self).__init__()
-        # --------- Basic Parameters ----------
+        # ---------------------- Basic Parameters ----------------------
         self.cfg = cfg
         self.device = device
-        self.stride = [8, 16, 32]
+        self.stride = cfg['stride']
         self.num_classes = num_classes
         self.trainable = trainable
         self.conf_thresh = conf_thresh
         self.nms_thresh = nms_thresh
         self.topk = topk
         self.deploy = deploy
-        
+                
         # ------------------- Network Structure -------------------
         ## 主干网络
         self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
         
-        ## 颈部网络: 特征金字塔
-        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=int(256*cfg['width']))
+        ## 特征金字塔
+        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))
         self.head_dim = self.fpn.out_dim
 
         ## 检测头
@@ -59,6 +58,7 @@ class YOLOX(nn.Module):
                                 for head in self.non_shared_heads
                               ])                 
 
+
     # ---------------------- Basic Functions ----------------------
     ## generate anchor points
     def generate_anchors(self, level, fmp_size):
@@ -134,16 +134,17 @@ class YOLOX(nn.Module):
     # ---------------------- Main Process for Inference ----------------------
     @torch.no_grad()
     def inference_single_image(self, x):
-        # backbone
+        # 主干网络
         pyramid_feats = self.backbone(x)
 
-        # fpn
+        # 特征金字塔
         pyramid_feats = self.fpn(pyramid_feats)
 
-        # non-shared heads
+        # 检测头
         all_obj_preds = []
         all_cls_preds = []
         all_box_preds = []
+        all_anchors = []
         for level, (feat, head) in enumerate(zip(pyramid_feats, self.non_shared_heads)):
             cls_feat, reg_feat = head(feat)
 
@@ -171,6 +172,7 @@ class YOLOX(nn.Module):
             all_obj_preds.append(obj_pred)
             all_cls_preds.append(cls_pred)
             all_box_preds.append(box_pred)
+            all_anchors.append(anchors)
 
         if self.deploy:
             obj_preds = torch.cat(all_obj_preds, dim=0)
@@ -190,17 +192,18 @@ class YOLOX(nn.Module):
             return bboxes, scores, labels
 
 
+    # ---------------------- Main Process for Training ----------------------
     def forward(self, x):
         if not self.trainable:
             return self.inference_single_image(x)
         else:
-            # backbone
+            # 主干网络
             pyramid_feats = self.backbone(x)
 
-            # fpn
+            # 特征金字塔
             pyramid_feats = self.fpn(pyramid_feats)
 
-            # non-shared heads
+            # 检测头
             all_anchors = []
             all_obj_preds = []
             all_cls_preds = []

+ 45 - 17
models/detectors/yolox/yolox_basic.py

@@ -1,7 +1,9 @@
+import numpy as np
 import torch
 import torch.nn as nn
 
 
+# ---------------------------- 2D CNN ----------------------------
 class SiLU(nn.Module):
     """export-friendly version of nn.SiLU()"""
 
@@ -25,6 +27,8 @@ def get_activation(act_type=None):
         return nn.Mish(inplace=True)
     elif act_type == 'silu':
         return nn.SiLU(inplace=True)
+    elif act_type is None:
+        return nn.Identity()
 
 
 def get_norm(norm_type, dim):
@@ -43,8 +47,8 @@ class Conv(nn.Module):
                  p=0,                  # padding
                  s=1,                  # padding
                  d=1,                  # dilation
-                 act_type='',          # activation
-                 norm_type='',         # normalization
+                 act_type='lrelu',     # activation
+                 norm_type='BN',       # normalization
                  depthwise=False):
         super(Conv, self).__init__()
         convs = []
@@ -77,25 +81,21 @@ class Conv(nn.Module):
         return self.convs(x)
 
 
-# ConvBlocks
+# ---------------------------- YOLOv5 Modules ----------------------------
+## BottleNeck
 class Bottleneck(nn.Module):
     def __init__(self,
                  in_dim,
                  out_dim,
                  expand_ratio=0.5,
-                 kernel=[1, 3],
                  shortcut=False,
+                 depthwise=False,
                  act_type='silu',
-                 norm_type='BN',
-                 depthwise=False):
+                 norm_type='BN'):
         super(Bottleneck, self).__init__()
         inter_dim = int(out_dim * expand_ratio)  # hidden channels            
-        self.cv1 = Conv(in_dim, inter_dim, k=kernel[0], p=kernel[0]//2,
-                        norm_type=norm_type, act_type=act_type,
-                        depthwise=False if kernel[0] == 1 else depthwise)
-        self.cv2 = Conv(inter_dim, out_dim, k=kernel[1], p=kernel[1]//2,
-                        norm_type=norm_type, act_type=act_type,
-                        depthwise=False if kernel[1] == 1 else depthwise)
+        self.cv1 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
+        self.cv2 = Conv(inter_dim, out_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
         self.shortcut = shortcut and in_dim == out_dim
 
     def forward(self, x):
@@ -103,14 +103,12 @@ class Bottleneck(nn.Module):
 
         return x + h if self.shortcut else h
 
-
-# CSP-stage block
+## CSP-stage block
 class CSPBlock(nn.Module):
     def __init__(self,
                  in_dim,
                  out_dim,
                  expand_ratio=0.5,
-                 kernel=[1, 3],
                  nblocks=1,
                  shortcut=False,
                  depthwise=False,
@@ -122,7 +120,7 @@ class CSPBlock(nn.Module):
         self.cv2 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
         self.cv3 = Conv(2 * inter_dim, out_dim, k=1, norm_type=norm_type, act_type=act_type)
         self.m = nn.Sequential(*[
-            Bottleneck(inter_dim, inter_dim, expand_ratio=1.0, kernel=kernel, shortcut=shortcut,
+            Bottleneck(inter_dim, inter_dim, expand_ratio=1.0, shortcut=shortcut,
                        norm_type=norm_type, act_type=act_type, depthwise=depthwise)
                        for _ in range(nblocks)
                        ])
@@ -134,4 +132,34 @@ class CSPBlock(nn.Module):
         out = self.cv3(torch.cat([x3, x2], dim=1))
 
         return out
-    
+    
+
+# ---------------------------- FPN Modules ----------------------------
+## build fpn's core block
+def build_fpn_block(cfg, in_dim, out_dim):
+    if cfg['fpn_core_block'] == 'CSPBlock':
+        layer = CSPBlock(in_dim=in_dim,
+                         out_dim=out_dim,
+                         expand_ratio=0.5,
+                         nblocks = round(3*cfg['depth']),
+                         shortcut = False,
+                         act_type=cfg['fpn_act'],
+                         norm_type=cfg['fpn_norm'],
+                         depthwise=cfg['fpn_depthwise']
+                         )
+        
+    return layer
+
+## build fpn's reduce layer
+def build_reduce_layer(cfg, in_dim, out_dim):
+    if cfg['fpn_reduce_layer'] == 'Conv':
+        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+        
+    return layer
+
+## build fpn's downsample layer
+def build_downsample_layer(cfg, in_dim, out_dim):
+    if cfg['fpn_downsample_layer'] == 'Conv':
+        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+        
+    return layer

+ 15 - 79
models/detectors/yolox/yolox_head.py

@@ -1,9 +1,7 @@
 import torch
 import torch.nn as nn
-try:
-    from .yolox_basic import Conv
-except:
-    from yolox_basic import Conv
+
+from .yolox_basic import Conv
 
 
 class DecoupledHead(nn.Module):
@@ -11,47 +9,46 @@ class DecoupledHead(nn.Module):
         super().__init__()
         print('==============================')
         print('Head: Decoupled Head')
+        # --------- Basic Parameters ----------
         self.in_dim = in_dim
         self.num_cls_head=cfg['num_cls_head']
         self.num_reg_head=cfg['num_reg_head']
-        self.act_type=cfg['head_act']
-        self.norm_type=cfg['head_norm']
 
-        # cls head
+        # --------- Network Parameters ----------
+        ## cls head
         cls_feats = []
         self.cls_out_dim = max(out_dim, num_classes)
         for i in range(cfg['num_cls_head']):
             if i == 0:
                 cls_feats.append(
                     Conv(in_dim, self.cls_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
                         depthwise=cfg['head_depthwise'])
                         )
             else:
                 cls_feats.append(
                     Conv(self.cls_out_dim, self.cls_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
                         depthwise=cfg['head_depthwise'])
-                        )
-                
-        # reg head
+                        )      
+        ## reg head
         reg_feats = []
         self.reg_out_dim = max(out_dim, 64)
         for i in range(cfg['num_reg_head']):
             if i == 0:
                 reg_feats.append(
                     Conv(in_dim, self.reg_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
                         depthwise=cfg['head_depthwise'])
                         )
             else:
                 reg_feats.append(
                     Conv(self.reg_out_dim, self.reg_out_dim, k=3, p=1, s=1, 
-                        act_type=self.act_type,
-                        norm_type=self.norm_type,
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
                         depthwise=cfg['head_depthwise'])
                         )
 
@@ -74,64 +71,3 @@ def build_head(cfg, in_dim, out_dim, num_classes=80):
     head = DecoupledHead(cfg, in_dim, out_dim, num_classes) 
 
     return head
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'head_depthwise': False,
-        'reg_max': 16,
-    }
-    fpn_dims = [256, 512, 512]
-    # Head-1
-    model = build_head(cfg, 256, fpn_dims, num_classes=80)
-    x = torch.randn(1, 256, 80, 80)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-1: Params : {:.2f} M'.format(params / 1e6))
-
-    # Head-2
-    model = build_head(cfg, 512, fpn_dims, num_classes=80)
-    x = torch.randn(1, 512, 40, 40)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('Head-2: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-2: Params : {:.2f} M'.format(params / 1e6))
-
-    # Head-3
-    model = build_head(cfg, 512, fpn_dims, num_classes=80)
-    x = torch.randn(1, 512, 20, 20)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('Head-3: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-3: Params : {:.2f} M'.format(params / 1e6))

+ 48 - 100
models/detectors/yolox/yolox_pafpn.py

@@ -1,124 +1,80 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
-try:
-    from .yolox_basic import Conv, CSPBlock
-except:
-    from yolox_basic import Conv, CSPBlock
 
+from .yolox_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
 
-# PaFPN-CSP
+
+# YOLO-Style PaFPN
 class Yolov5PaFPN(nn.Module):
-    def __init__(self, 
-                 in_dims=[256, 512, 1024],
-                 out_dim=256,
-                 width=1.0,
-                 depth=1.0,
-                 act_type='silu',
-                 norm_type='BN',
-                 depthwise=False):
+    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
         super(Yolov5PaFPN, self).__init__()
+        # --------------------------- Basic Parameters ---------------------------
         self.in_dims = in_dims
-        self.out_dim = out_dim
         c3, c4, c5 = in_dims
-
-        # top dwon
-        ## P5 -> P4
-        self.reduce_layer_1 = Conv(c5, int(512*width), k=1, norm_type=norm_type, act_type=act_type)
-        self.top_down_layer_1 = CSPBlock(c4 + int(512*width),
-                                         int(512*width),
-                                         expand_ratio=0.5,
-                                         kernel=[1, 3],
-                                         nblocks=int(3*depth),
-                                         shortcut=False,
-                                         act_type=act_type,
-                                         norm_type=norm_type,
-                                         depthwise=depthwise
-                                         )
-
-        ## P4 -> P3
-        self.reduce_layer_2 = Conv(c4, int(256*width), k=1, norm_type=norm_type, act_type=act_type)  # 14
-        self.top_down_layer_2 = CSPBlock(c3 + int(256*width),
-                                         int(256*width),
-                                         expand_ratio=0.5,
-                                         kernel=[1, 3],
-                                         nblocks=int(3*depth),
-                                         shortcut=False,
-                                         act_type=act_type,
-                                         norm_type=norm_type,
-                                         depthwise=depthwise
-                                         )
-
-        # bottom up
-        ## P3 -> P4
-        self.reduce_layer_3 = Conv(int(256*width), int(256*width), k=3, p=1, s=2,
-                                   act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.bottom_up_layer_1 = CSPBlock(int(256*width) + int(256*width),
-                                         int(512*width),
-                                         expand_ratio=0.5,
-                                         kernel=[1, 3],
-                                         nblocks=int(3*depth),
-                                         shortcut=False,
-                                         act_type=act_type,
-                                         norm_type=norm_type,
-                                         depthwise=depthwise
-                                         )
-
-        ## P4 -> P5
-        self.reduce_layer_4 = Conv(int(512*width), int(512*width), k=3, p=1, s=2,
-                                   act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.bottom_up_layer_2 = CSPBlock(int(512*width) + int(512*width),
-                                         int(1024*width),
-                                         expand_ratio=0.5,
-                                         kernel=[1, 3],
-                                         nblocks=int(3*depth),
-                                         shortcut=False,
-                                         act_type=act_type,
-                                         norm_type=norm_type,
-                                         depthwise=depthwise
-                                         )
-
-        # output proj layers
+        width = cfg['width']
+
+        # --------------------------- Network Parameters ---------------------------
+        ## top dwon
+        ### P5 -> P4
+        self.reduce_layer_1 = build_reduce_layer(cfg, c5, round(512*width))
+        self.top_down_layer_1 = build_fpn_block(cfg, c4 + round(512*width), round(512*width))
+
+        ### P4 -> P3
+        self.reduce_layer_2 = build_reduce_layer(cfg, round(512*width), round(256*width))
+        self.top_down_layer_2 = build_fpn_block(cfg, c3 + round(256*width), round(256*width))
+
+        ## bottom up
+        ### P3 -> P4
+        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*width), round(256*width))
+        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*width) + round(256*width), round(512*width))
+
+        ### P4 -> P5
+        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*width), round(512*width))
+        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*width) + round(512*width), round(1024*width))
+                
+        ## output proj layers
         if out_dim is not None:
-            # output proj layers
             self.out_layers = nn.ModuleList([
                 Conv(in_dim, out_dim, k=1,
-                        norm_type=norm_type, act_type=act_type)
-                        for in_dim in [int(256 * width), int(512 * width), int(1024 * width)]
-                        ])
+                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+                     for in_dim in [round(256*width), round(512*width), round(1024*width)]
+                     ])
             self.out_dim = [out_dim] * 3
-
         else:
             self.out_layers = None
-            self.out_dim = [int(256 * width), int(512 * width), int(1024 * width)]
+            self.out_dim = [round(256*width), round(512*width), round(1024*width)]
 
 
     def forward(self, features):
         c3, c4, c5 = features
 
+        # Top down
+        ## P5 -> P4
         c6 = self.reduce_layer_1(c5)
-        c7 = F.interpolate(c6, scale_factor=2.0)   # s32->s16
+        c7 = F.interpolate(c6, scale_factor=2.0)
         c8 = torch.cat([c7, c4], dim=1)
         c9 = self.top_down_layer_1(c8)
-        # P3/8
+        ## P4 -> P3
         c10 = self.reduce_layer_2(c9)
-        c11 = F.interpolate(c10, scale_factor=2.0)   # s16->s8
+        c11 = F.interpolate(c10, scale_factor=2.0)
         c12 = torch.cat([c11, c3], dim=1)
-        c13 = self.top_down_layer_2(c12)  # to det
-        # p4/16
-        c14 = self.reduce_layer_3(c13)
+        c13 = self.top_down_layer_2(c12)
+
+        # Bottom up
+        ## p3 -> P4
+        c14 = self.downsample_layer_1(c13)
         c15 = torch.cat([c14, c10], dim=1)
-        c16 = self.bottom_up_layer_1(c15)  # to det
-        # p5/32
-        c17 = self.reduce_layer_4(c16)
+        c16 = self.bottom_up_layer_1(c15)
+        ## P4 -> P5
+        c17 = self.downsample_layer_2(c16)
         c18 = torch.cat([c17, c6], dim=1)
-        c19 = self.bottom_up_layer_2(c18)  # to det
+        c19 = self.bottom_up_layer_2(c18)
 
         out_feats = [c13, c16, c19] # [P3, P4, P5]
-
+        
         # output proj layers
         if self.out_layers is not None:
-            # output proj layers
             out_feats_proj = []
             for feat, layer in zip(out_feats, self.out_layers):
                 out_feats_proj.append(layer(feat))
@@ -129,16 +85,8 @@ class Yolov5PaFPN(nn.Module):
 
 def build_fpn(cfg, in_dims, out_dim=None):
     model = cfg['fpn']
-    # build neck
+    # build pafpn
     if model == 'yolov5_pafpn':
-        fpn_net = Yolov5PaFPN(in_dims=in_dims,
-                             out_dim=out_dim,
-                             width=cfg['width'],
-                             depth=cfg['depth'],
-                             act_type=cfg['fpn_act'],
-                             norm_type=cfg['fpn_norm'],
-                             depthwise=cfg['fpn_depthwise']
-                             )
-
+        fpn_net = Yolov5PaFPN(cfg, in_dims, out_dim)
 
     return fpn_net

+ 3 - 3
train.py

@@ -107,11 +107,11 @@ def train():
     print("----------------------------------------------------------")
 
     # Build DDP
-    world_size = distributed_utils.get_world_size()
-    print('World size: {}'.format(world_size))
     if args.distributed:
         distributed_utils.init_distributed_mode(args)
         print("git:\n  {}\n".format(distributed_utils.get_sha()))
+    world_size = distributed_utils.get_world_size()
+    print('World size: {}'.format(world_size))
 
     # Build CUDA
     if args.cuda:
@@ -150,7 +150,7 @@ def train():
         dist.barrier()
 
     # Build Trainer
-    trainer = build_trainer(args, data_cfg, model_cfg, trans_cfg, device, model_without_ddp, criterion)
+    trainer = build_trainer(args, data_cfg, model_cfg, trans_cfg, device, model_without_ddp, criterion, world_size)
 
     # --------------------------------- Train: Start ---------------------------------
     ## Eval before training

+ 1 - 1
train_ddp.sh

@@ -5,7 +5,7 @@ python -m torch.distributed.run --nproc_per_node=8 train.py \
                                                     -dist \
                                                     -d coco \
                                                     --root /data/datasets/ \
-                                                    -m yolo_free_v1_l \
+                                                    -m yolox_l \
                                                     -bs 128 \
                                                     -size 640 \
                                                     --wp_epoch 3 \

+ 2 - 2
utils/misc.py

@@ -202,11 +202,11 @@ class ModelEMA(object):
     For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
     """
 
-    def __init__(self, model, decay=0.9999, tau=2000, updates=0):
+    def __init__(self, cfg, model, updates=0):
         # Create EMA
         self.ema = deepcopy(self.de_parallel(model)).eval()  # FP32 EMA
         self.updates = updates  # number of EMA updates
-        self.decay = lambda x: decay * (1 - math.exp(-x / tau))  # decay exponential ramp (to help early epochs)
+        self.decay = lambda x: cfg['ema_decay'] * (1 - math.exp(-x / cfg['ema_tau']))  # decay exponential ramp (to help early epochs)
         for p in self.ema.parameters():
             p.requires_grad_(False)