yjh0410 1 gadu atpakaļ
vecāks
revīzija
edc44c7680

+ 1 - 5
config/__init__.py

@@ -97,8 +97,7 @@ from .model_config.yolov5_config import yolov5_cfg, yolov5_adamw_cfg
 from .model_config.yolov7_config import yolov7_cfg
 from .model_config.yolov8_config import yolov8_cfg
 from .model_config.yolox_config import yolox_cfg, yolox_adamw_cfg
-## My RTCDet series
-from .model_config.rtcdet_config import rtcdet_cfg, rtcdet_seg_cfg, rtcdet_pos_cfg, rtcdet_seg_pos_cfg
+## Real-time DETR series
 from .model_config.rtdetr_config import rtdetr_cfg
 from .model_config.rtpdetr_config import rtpdetr_cfg
 
@@ -135,9 +134,6 @@ def build_model_config(args):
     # YOLOX-AdamW
     elif args.model in ['yolox_n_adamw', 'yolox_s_adamw', 'yolox_m_adamw', 'yolox_l_adamw', 'yolox_x_adamw']:
         cfg = yolox_adamw_cfg[args.model]
-    # RTCDet
-    elif args.model in ['rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
-        cfg = rtcdet_cfg[args.model]
     # RT-DETR
     elif args.model in ['rtdetr_r18', 'rtdetr_r34', 'rtdetr_r50', 'rtdetr_r101']:
         cfg = rtdetr_cfg[args.model]

+ 0 - 458
config/model_config/rtcdet_config.py

@@ -1,458 +0,0 @@
-# Real-time Convolution Object Detector
-
-
-# ------------------- Det task --------------------
-rtcdet_cfg = {
-    'rtcdet_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_n',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-
-    'rtcdet_s':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.50,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_s',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-
-    'rtcdet_m':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.75,
-        'depth': 0.67,
-        'ratio': 1.5,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_m',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-
-    'rtcdet_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_l',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-
-    'rtcdet_x':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.25,
-        'depth': 1.34,
-        'ratio': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_x',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-
-}
-
-
-# ------------------- Det + Seg task -------------------
-rtcdet_seg_cfg = {
-    'rtcdet_seg_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_n',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-}
-
-
-# ------------------- Det + Pos task -------------------
-rtcdet_pos_cfg = {
-    'rtcdet_pos_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_n',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-}
-
-
-# ------------------- Det + Seg + Pos task -------------------
-rtcdet_seg_pos_cfg = {
-    'rtcdet_seg_pos_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'det_head': {'name': 'decoupled_head',
-                     'num_cls_head': 2,
-                     'num_reg_head': 2,
-                     'head_act': 'silu',
-                     'head_norm': 'BN',
-                     'head_depthwise': False,
-                     'reg_max': 16,  
-                     },
-        'seg_head': {'name': None,
-                     },
-        'pos_head': {'name': None,
-                     },
-        # ---------------- Train config ----------------
-        ## input
-        'multi_scale': [0.5, 1.5],   # 320 -> 960
-        'trans_type': 'yolov5_n',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_box_weight': 7.5,
-        'loss_dfl_weight': 1.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtcdet',
-    },
-}

+ 1 - 6
models/detectors/__init__.py

@@ -11,8 +11,7 @@ from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
 from .yolov8.build import build_yolov8
 from .yolox.build import build_yolox
-# My RTCDet series
-from .rtcdet.build import build_rtcdet
+# Real-time DETR series
 from .rtdetr.build import build_rtdetr
 from .rtpdetr.build import build_rtpdetr
 
@@ -64,10 +63,6 @@ def build_model(args,
     elif args.model in ['yolox_n_adamw', 'yolox_s_adamw', 'yolox_m_adamw', 'yolox_l_adamw', 'yolox_x_adamw']:
         model, criterion = build_yolox(
             args, model_cfg, device, num_classes, trainable, deploy)
-    # RTCDet
-    elif args.model in ['rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
-        model, criterion = build_rtcdet(
-            args, model_cfg, device, num_classes, trainable, deploy)
     # RT-DETR
     elif args.model in ['rtdetr_r18', 'rtdetr_r34', 'rtdetr_r50', 'rtdetr_r101']:
         model, criterion = build_rtdetr(

+ 0 - 69
models/detectors/rtcdet/README.md

@@ -1,69 +0,0 @@
-# RTCDet:
-
-## Effectiveness of the pretrained weight
-- **IN1K**: We pretrained the backbone (RTCNet) on the ImageNet-1K dataset with the classification task setting.
-- **Scratch**:  We just train the detector on the COCO without any pretrained weights for the backbone.
-
-For the small model:
-|   Model  | Pretrained | Scale | Epoch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 |
-|----------|------------|-------|-------|------------------------|-------------------|
-| RTCDet-N | -          |  640  |  500  |          37.0          |        52.9       |
-| RTCDet-N | IN1K       |  640  |  500  |                        |                   |
-| RTCDet-L | -          |  640  |  500  |          50.2          |        68.0       |
-| RTCDet-L | IN1K       |  640  |  500  |                        |                   |
-
-## Results on the COCO-val
-|   Model  | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|----------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDet-N | 8xb16 |  640  |          37.0          |        52.9       |        8.8        |         3.2        |  |
-| RTCDet-S | 8xb16 |  640  |                        |                   |                   |                    |  |
-| RTCDet-M | 8xb16 |  640  |                        |                   |                   |                    |  |
-| RTCDet-L | 8xb16 |  640  |                        |                   |                   |                    |  |
-| RTCDet-X | 8xb16 |  640  |          50.7          |        68.3       |       165.7       |         43.7       |  |
-
-- For the backbone, we use the ImageNet-1K pretrained weight.
-- For training, we train RTCDet series with 300 epochs on COCO.
-- For data augmentation, we use the large scale jitter (LSJ), Mosaic augmentation and Mixup augmentation, following the YOLOv8.
-- For optimizer, we use AdamW with weight decay 0.05 and base per image lr 0.001 / 64,.
-- For learning rate scheduler, we use Linear decay scheduler.
-
-## Train RTCDet
-### Single GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m rtcdet_s -bs 16 -size 640 --wp_epoch 3 --max_epoch 300 --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --multi_scale 
-```
-
-### Multi GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root /data/datasets/ -m rtcdet_s -bs 128 -size 640 --wp_epoch 3 --max_epoch 300  --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --sybn --multi_scale --save_folder weights/ 
-```
-
-## Test RTCDet
-Taking testing RTCDet-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m rtcdet_s --weight path/to/RTCDet_s.pth -size 640 -vt 0.4 --show 
-```
-
-## Evaluate RTCDet
-Taking evaluating RTCDet-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco-val --root path/to/coco -m rtcdet_s --weight path/to/RTCDet_s.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show --gif
-```

+ 0 - 44
models/detectors/rtcdet/build.py

@@ -1,44 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .rtcdet import RTCDet
-
-
-# build object detector
-def build_rtcdet(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-    
-    print('==============================')
-    print('Model Configuration: \n', cfg)
-    
-    # -------------- Build YOLO --------------
-    model = RTCDet(cfg                = cfg,
-                   device             = device, 
-                   num_classes        = num_classes,
-                   trainable          = trainable,
-                   conf_thresh        = args.conf_thresh,
-                   nms_thresh         = args.nms_thresh,
-                   topk               = args.topk,
-                   deploy             = deploy,
-                   no_multi_labels    = args.no_multi_labels,
-                   nms_class_agnostic = args.nms_class_agnostic
-                   )
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes)
-        
-    return model, criterion

+ 0 - 268
models/detectors/rtcdet/loss.py

@@ -1,268 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-from utils.box_ops import bbox2dist, bbox_iou
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import TaskAlignedAssigner
-
-
-class Criterion(object):
-    def __init__(self, cfg, device, num_classes=80):
-        # --------------- Basic parameters ---------------
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        self.reg_max = cfg['det_head']['reg_max']
-        # --------------- Loss config ---------------
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_box_weight = cfg['loss_box_weight']
-        self.loss_dfl_weight = cfg['loss_dfl_weight']
-        # --------------- Matcher config ---------------
-        self.matcher_hpy = cfg['matcher_hpy']
-        self.matcher = TaskAlignedAssigner(num_classes     = num_classes,
-                                           topk_candidates = self.matcher_hpy['topk_candidates'],
-                                           alpha           = self.matcher_hpy['alpha'],
-                                           beta            = self.matcher_hpy['beta']
-                                           )
-
-    # -------------------- Basic loss functions --------------------
-    def loss_classes(self, pred_cls, gt_score):
-        # Compute BCE loss
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
-
-        return loss_cls
-    
-    def loss_bboxes(self, pred_box, gt_box, bbox_weight):
-        # regression loss
-        ious = bbox_iou(pred_box, gt_box, xywh=False, CIoU=True)
-        loss_box = (1.0 - ious.squeeze(-1)) * bbox_weight
-
-        return loss_box
-    
-    def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
-        # rescale coords by stride
-        gt_box_s = gt_box / stride
-        anchor_s = anchor / stride
-
-        # compute deltas
-        gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.reg_max - 1)
-
-        gt_left = gt_ltrb_s.to(torch.long)
-        gt_right = gt_left + 1
-
-        weight_left = gt_right.to(torch.float) - gt_ltrb_s
-        weight_right = 1 - weight_left
-
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss_dfl = (loss_left + loss_right).mean(-1)
-        
-        if bbox_weight is not None:
-            loss_dfl *= bbox_weight
-
-        return loss_dfl
-
-    def compute_det_loss(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_reg']: List(Tensor) [B, M, 4*(reg_max+1)]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['anchors']: List(Tensor) [M, 2]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            outputs['stride_tensor']: List(Tensor) [M, 1]
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        strides = outputs['stride_tensor']
-        anchors = outputs['anchors']
-        anchors = torch.cat(anchors, dim=0)
-        num_anchors = anchors.shape[0]
-
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        
-        # --------------- label assignment ---------------
-        gt_score_targets = []
-        gt_bbox_targets = []
-        fg_masks = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)     # [Mp,]
-            tgt_boxs = targets[batch_idx]["boxes"].to(device)        # [Mp, 4]
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
-                # There is no valid gt
-                fg_mask = cls_preds.new_zeros(1, num_anchors).bool()                #[1, M,]
-                gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes))  #[1, M, C]
-                gt_box = cls_preds.new_zeros((1, num_anchors, 4))                   #[1, M, 4]
-            else:
-                tgt_labels = tgt_labels[None, :, None]      # [1, Mp, 1]
-                tgt_boxs = tgt_boxs[None]                   # [1, Mp, 4]
-                (
-                    _,
-                    gt_box,     # [1, M, 4]
-                    gt_score,   # [1, M, C]
-                    fg_mask,    # [1, M,]
-                    _
-                ) = self.matcher(
-                    pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(), 
-                    pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
-                    anc_points = anchors,
-                    gt_labels = tgt_labels,
-                    gt_bboxes = tgt_boxs
-                    )
-            gt_score_targets.append(gt_score)
-            gt_bbox_targets.append(gt_box)
-            fg_masks.append(fg_mask)
-
-        # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
-        fg_masks = torch.cat(fg_masks, 0).view(-1)                                    # [BM,]
-        gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes)  # [BM, C]
-        gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4)                   # [BM, 4]
-        num_fgs = gt_score_targets.sum()
-        
-        # Average loss normalizer across all the GPUs
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.loss_classes(cls_preds, gt_score_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        box_targets_pos = gt_bbox_targets.view(-1, 4)[fg_masks]
-        bbox_weight = gt_score_targets[fg_masks].sum(-1)
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
-        loss_box = loss_box.sum() / num_fgs
-
-        # ------------------ Distribution focal loss  ------------------
-        ## process anchors
-        anchors = torch.cat(outputs['anchors'], dim=0)
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
-        ## process stride tensors
-        strides = torch.cat(outputs['stride_tensor'], dim=0)
-        strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
-        ## fg preds
-        reg_preds_pos = reg_preds.view(-1, 4*self.reg_max)[fg_masks]
-        anchors_pos = anchors[fg_masks]
-        strides_pos = strides[fg_masks]
-        ## compute dfl
-        loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos, bbox_weight)
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight + loss_dfl * self.loss_dfl_weight
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                loss_dfl = loss_dfl,
-                losses = losses
-        )
-
-        return loss_dict
-    
-    def compute_seg_loss(self, outputs, targets):
-        """
-            Input:
-                outputs: (Dict) -> {
-                    'pred_cls': (List[torch.Tensor] -> [B, M, Nc]),
-                    'pred_reg': (List[torch.Tensor] -> [B, M, 4]),
-                    'pred_box': (List[torch.Tensor] -> [B, M, 4]),
-                    'strides':  (List[Int])
-                }
-                target: (List[Dict]) [
-                    {'boxes':  (torch.Tensor) -> [N, 4], 
-                     'labels': (torch.Tensor) -> [N,],
-                     ...}, ...
-                     ]
-            Output:
-                loss_dict: (Dict) -> {
-                    'loss_cls': (torch.Tensor) It is a scalar.),
-                    'loss_box': (torch.Tensor) It is a scalar.),
-                    'loss_box_aux': (torch.Tensor) It is a scalar.),
-                    'losses':  (torch.Tensor) It is a scalar.),
-                }
-        """
-
-    def compute_pos_loss(self, outputs, targets):
-        """
-            Input:
-                outputs: (Dict) -> {
-                    'pred_cls': (List[torch.Tensor] -> [B, M, Nc]),
-                    'pred_reg': (List[torch.Tensor] -> [B, M, 4]),
-                    'pred_box': (List[torch.Tensor] -> [B, M, 4]),
-                    'strides':  (List[Int])
-                }
-                target: (List[Dict]) [
-                    {'boxes':  (torch.Tensor) -> [N, 4], 
-                     'labels': (torch.Tensor) -> [N,],
-                     ...}, ...
-                     ]
-            Output:
-                loss_dict: (Dict) -> {
-                    'loss_cls': (torch.Tensor) It is a scalar.),
-                    'loss_box': (torch.Tensor) It is a scalar.),
-                    'loss_box_aux': (torch.Tensor) It is a scalar.),
-                    'losses':  (torch.Tensor) It is a scalar.),
-                }
-        """
-
-    def __call__(self, outputs, targets, epoch=0, task='det'):
-        # -------------- Detection loss --------------
-        det_loss_dict = None
-        if outputs['det_outputs'] is not None:
-            det_loss_dict = self.compute_det_loss(outputs['det_outputs'], targets)
-        # -------------- Segmentation loss --------------
-        seg_loss_dict = None
-        if outputs['seg_outputs'] is not None:
-            seg_loss_dict = self.compute_seg_loss(outputs['seg_outputs'], targets)
-        # -------------- Human pose loss --------------
-        pos_loss_dict = None
-        if outputs['pos_outputs'] is not None:
-            pos_loss_dict = self.compute_seg_loss(outputs['pos_outputs'], targets)
-
-        # Loss dict
-        if task == 'det':
-            return det_loss_dict
-        
-        if task == 'det_seg':
-            return {'det_loss_dict': det_loss_dict,
-                    'seg_loss_dict': seg_loss_dict}
-        
-        if task == 'det_pos':
-            return {'det_loss_dict': det_loss_dict,
-                    'pos_loss_dict': pos_loss_dict}
-        
-        if task == 'det_seg_pos':
-            return {'det_loss_dict': det_loss_dict,
-                    'seg_loss_dict': seg_loss_dict,
-                    'pos_loss_dict': pos_loss_dict}
-
-
-def build_criterion(cfg, device, num_classes):
-    criterion = Criterion(cfg=cfg, device=device, num_classes=num_classes)
-
-    return criterion
-
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 199
models/detectors/rtcdet/matcher.py

@@ -1,199 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import bbox_iou
-
-
-# -------------------------- Task Aligned Assigner --------------------------
-class TaskAlignedAssigner(nn.Module):
-    def __init__(self,
-                 num_classes     = 80,
-                 topk_candidates = 10,
-                 alpha           = 0.5,
-                 beta            = 6.0, 
-                 eps             = 1e-9):
-        super(TaskAlignedAssigner, self).__init__()
-        self.topk_candidates = topk_candidates
-        self.num_classes = num_classes
-        self.bg_idx = num_classes
-        self.alpha = alpha
-        self.beta = beta
-        self.eps = eps
-
-    @torch.no_grad()
-    def forward(self,
-                pd_scores,
-                pd_bboxes,
-                anc_points,
-                gt_labels,
-                gt_bboxes):
-        self.bs = pd_scores.size(0)
-        self.n_max_boxes = gt_bboxes.size(1)
-
-        mask_pos, align_metric, overlaps = self.get_pos_mask(
-            pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
-
-        target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
-            mask_pos, overlaps, self.n_max_boxes)
-
-        # Assigned target
-        target_labels, target_bboxes, target_scores = self.get_targets(
-            gt_labels, gt_bboxes, target_gt_idx, fg_mask)
-
-        # normalize
-        align_metric *= mask_pos
-        pos_align_metrics = align_metric.amax(axis=-1, keepdim=True)  # b, max_num_obj
-        pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True)  # b, max_num_obj
-        norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
-        target_scores = target_scores * norm_align_metric
-
-        return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
-
-    def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get in_gts mask, (b, max_num_obj, h*w)
-        mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts)
-        # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric)
-        # merge all mask to a final mask, (b, max_num_obj, h*w)
-        mask_pos = mask_topk * mask_in_gts
-
-        return mask_pos, align_metric, overlaps
-
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts):
-        """Compute alignment metric given predicted and ground truth bounding boxes."""
-        na = pd_bboxes.shape[-2]
-        mask_in_gts = mask_in_gts.bool()  # b, max_num_obj, h*w
-        overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device)
-        bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device)
-
-        ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).expand(-1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.squeeze(-1)  # b, max_num_obj
-        # Get the scores of each grid for each gt cls
-        bbox_scores[mask_in_gts] = pd_scores[ind[0], :, ind[1]][mask_in_gts]  # b, max_num_obj, h*w
-
-        # (b, max_num_obj, 1, 4), (b, 1, h*w, 4)
-        pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_in_gts]
-        gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_in_gts]
-        overlaps[mask_in_gts] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0)
-
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
-        return align_metric, overlaps
-
-    def select_topk_candidates(self, metrics, largest=True):
-        """
-        Args:
-            metrics: (b, max_num_obj, h*w).
-            topk_mask: (b, max_num_obj, topk) or None
-        """
-        # (b, max_num_obj, topk)
-        topk_metrics, topk_idxs = torch.topk(metrics, self.topk_candidates, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs)
-        # (b, max_num_obj, topk)
-        topk_idxs.masked_fill_(~topk_mask, 0)
-
-        # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        count_tensor = torch.zeros(metrics.shape, dtype=torch.int8, device=topk_idxs.device)
-        ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device)
-        for k in range(self.topk_candidates):
-            # Expand topk_idxs for each value of k and add 1 at the specified positions
-            count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones)
-        # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device))
-        # Filter invalid bboxes
-        count_tensor.masked_fill_(count_tensor > 1, 0)
-
-        return count_tensor.to(metrics.dtype)
-
-    def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        # Assigned target labels, (b, 1)
-        batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
-        target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
-        target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
-
-        # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w, 4)
-        target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
-
-        # Assigned target scores
-        target_labels.clamp_(0)
-
-        # 10x faster than F.one_hot()
-        target_scores = torch.zeros((target_labels.shape[0], target_labels.shape[1], self.num_classes),
-                                    dtype=torch.int64,
-                                    device=target_labels.device)  # (b, h*w, 80)
-        target_scores.scatter_(2, target_labels.unsqueeze(-1), 1)
-
-        fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
-        target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
-
-        return target_labels, target_bboxes, target_scores
-    
-
-# -------------------------- Basic Functions --------------------------
-def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
-    """select the positive anchors's center in gt
-    Args:
-        xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
-        gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    n_anchors = xy_centers.size(0)
-    bs, n_max_boxes, _ = gt_bboxes.size()
-    _gt_bboxes = gt_bboxes.reshape([-1, 4])
-    xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
-    gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
-    gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
-    b_lt = xy_centers - gt_bboxes_lt
-    b_rb = gt_bboxes_rb - xy_centers
-    bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
-    bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
-    return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
-
-def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
-    """if an anchor box is assigned to multiple gts,
-        the one with the highest iou will be selected.
-    Args:
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-        overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    Return:
-        target_gt_idx (Tensor): shape(bs, num_total_anchors)
-        fg_mask (Tensor): shape(bs, num_total_anchors)
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    fg_mask = mask_pos.sum(-2)
-    if fg_mask.max() > 1:  # one anchor is assigned to multiple gt_bboxes
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1)  # (b, n_max_boxes, h*w)
-        max_overlaps_idx = overlaps.argmax(1)  # (b, h*w)
-
-        is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device)
-        is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1)
-
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float()  # (b, n_max_boxes, h*w)
-        fg_mask = mask_pos.sum(-2)
-    # Find each grid serve which gt(index)
-    target_gt_idx = mask_pos.argmax(-2)  # (b, h*w)
-
-    return target_gt_idx, fg_mask, mask_pos
-
-def iou_calculator(box1, box2, eps=1e-9):
-    """Calculate iou for batch
-    Args:
-        box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
-        box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    box1 = box1.unsqueeze(2)  # [N, M1, 4] -> [N, M1, 1, 4]
-    box2 = box2.unsqueeze(1)  # [N, M2, 4] -> [N, 1, M2, 4]
-    px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
-    gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
-    x1y1 = torch.maximum(px1y1, gx1y1)
-    x2y2 = torch.minimum(px2y2, gx2y2)
-    overlap = (x2y2 - x1y1).clip(0).prod(-1)
-    area1 = (px2y2 - px1y1).clip(0).prod(-1)
-    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
-    union = area1 + area2 - overlap + eps
-
-    return overlap / union

+ 0 - 246
models/detectors/rtcdet/rtcdet.py

@@ -1,246 +0,0 @@
-# Real-time Convolutional Object Detector
-
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .rtcdet_backbone import build_backbone
-from .rtcdet_neck import build_neck
-from .rtcdet_pafpn import build_fpn
-from .rtcdet_head import build_det_head, build_seg_head, build_pose_head
-from .rtcdet_pred import build_det_pred, build_seg_pred, build_pose_pred
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# Real-time Convolutional General Object Detector
-class RTCDet(nn.Module):
-    def __init__(self,
-                 cfg,
-                 device,
-                 num_classes = 20,
-                 conf_thresh = 0.01,
-                 nms_thresh  = 0.5,
-                 topk        = 1000,
-                 trainable   = False,
-                 deploy      = False,
-                 no_multi_labels = False,
-                 nms_class_agnostic = False,
-                 ):
-        super(RTCDet, self).__init__()
-        # ---------------- Basic settings ----------------
-        ## Basic parameters
-        self.cfg = cfg
-        self.device = device
-        self.deploy = deploy
-        self.trainable = trainable
-        self.num_classes = num_classes
-        ## Network parameters
-        self.strides = cfg['stride']
-        self.reg_max = cfg['det_head']['reg_max']
-        self.num_levels = len(self.strides)
-        ## Post-process parameters
-        self.nms_thresh = nms_thresh
-        self.conf_thresh = conf_thresh
-        self.topk_candidates = topk
-        self.no_multi_labels = no_multi_labels
-        self.nms_class_agnostic = nms_class_agnostic
-        
-        # ---------------- Network settings ----------------
-        ## Backbone
-        self.backbone, self.fpn_feat_dims = build_backbone(cfg, pretrained=cfg['bk_pretrained']&trainable)
-
-        ## Neck: SPP
-        self.neck = build_neck(cfg, self.fpn_feat_dims[-1], self.fpn_feat_dims[-1])
-        self.fpn_feat_dims[-1] = self.neck.out_dim
-        
-        ## Neck: FPN
-        self.fpn = build_fpn(cfg, self.fpn_feat_dims)
-        self.fpn_dims = self.fpn.out_dim
-        self.cls_head_dim = max(self.fpn_dims[0], min(num_classes, 100))
-        self.reg_head_dim = max(self.fpn_dims[0]//4, 16, 4*self.reg_max)
-
-        ## Head: Detection
-        self.det_head = nn.Sequential(
-            build_det_head(cfg          = cfg['det_head'],
-                           in_dims      = self.fpn_dims,
-                           cls_head_dim = self.cls_head_dim,
-                           reg_head_dim = self.reg_head_dim,
-                           num_levels   = self.num_levels
-                           ),
-            build_det_pred(cls_dim     = self.cls_head_dim,
-                           reg_dim     = self.reg_head_dim,
-                           strides     = self.strides,
-                           num_classes = num_classes,
-                           num_coords  = 4,
-                           reg_max     = self.reg_max,
-                           num_levels  = self.num_levels
-                           )
-        )
-
-        ## Head: Segmentation
-        self.seg_head = nn.Sequential(
-            build_seg_head(cfg['seg_head']),
-            build_seg_pred()
-        ) if cfg['seg_head']['name'] is not None else None
-
-        ## Head: Human-pose
-        self.pos_head = nn.Sequential(
-            build_pose_head(cfg['pos_head']),
-            build_pose_pred()
-        ) if cfg['pos_head']['name'] is not None else None
-
-    # Post process
-    def post_process(self, cls_preds, box_preds):
-        """
-        Input:
-            cls_preds: List[np.array] -> [[M, C], ...]
-            box_preds: List[np.array] -> [[M, 4], ...]
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        assert len(cls_preds) == self.num_levels
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = cls_pred_i.sigmoid().flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
-
-        return bboxes, scores, labels
-    
-    # Main process
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Head ----------------
-        det_outpus = self.forward_det_head(pyramid_feats)
-        seg_outpus = self.forward_seg_head(pyramid_feats)
-        pos_outpus = self.forward_pos_head(pyramid_feats)
-        outputs = {
-            'det_outputs': det_outpus,
-            'seg_outputs': seg_outpus,
-            'pos_outputs': pos_outpus
-        }
-
-        if not self.trainable:
-            if seg_outpus is not None:
-                det_outpus.update(seg_outpus)
-            if pos_outpus is not None:
-                det_outpus.update(pos_outpus)
-            outputs = det_outpus
-        
-        else:
-            outputs = {
-                'det_outputs': det_outpus,
-                'seg_outputs': seg_outpus,
-                'pos_outputs': pos_outpus
-            }
-
-        return outputs
-
-    def forward_det_head(self, x):
-        # ---------------- Heads ----------------
-        outputs = self.det_head(x)
-
-        # ---------------- Post-process ----------------
-        if not self.trainable:
-            all_cls_preds = outputs['pred_cls']
-            all_box_preds = outputs['pred_box']
-
-            if self.deploy:
-                cls_preds = torch.cat(all_cls_preds, dim=1)[0]
-                box_preds = torch.cat(all_box_preds, dim=1)[0]
-                scores = cls_preds.sigmoid()
-                bboxes = box_preds
-                # [n_anchors_all, 4 + C]
-                outputs = torch.cat([bboxes, scores], dim=-1)
-
-            else:
-                # post process
-                bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-
-                outputs = {
-                    "scores": scores,
-                    "labels": labels,
-                    "bboxes": bboxes
-                }
-            
-        return outputs
-
-    def forward_seg_head(self, x):
-        if self.seg_head is None:
-            return None
-    
-    def forward_pos_head(self, x):
-        if self.pos_head is None:
-            return None

+ 0 - 184
models/detectors/rtcdet/rtcdet_backbone.py

@@ -1,184 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import Conv, RTCBlock
-except:
-    from rtcdet_basic import Conv, RTCBlock
-
-
-# IN-1k MIM pretrained
-model_urls = {
-    "rtcnet_n": None,
-    "rtcnet_s": None,
-    "rtcnet_m": None,
-    "rtcnet_l": None,
-    "rtcnet_x": None,
-}
-
-
-# ---------------------------- Basic functions ----------------------------
-## Real-time Convolutional Backbone
-class RTCBackbone(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, ratio=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(RTCBackbone, self).__init__()
-        # ---------------- Basic parameters ----------------
-        self.width_factor = width
-        self.depth_factor = depth
-        self.last_stage_factor = ratio
-        self.feat_dims = [round(64 * width), round(128 * width), round(256 * width), round(512 * width), round(512 * width * ratio)]
-        # ---------------- Network parameters ----------------
-        ## P1/2
-        self.layer_1 = Conv(3, self.feat_dims[0], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type)
-        ## P2/4
-        self.layer_2 = nn.Sequential(
-            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            RTCBlock(in_dim     = self.feat_dims[1],
-                     out_dim    = self.feat_dims[1],
-                     num_blocks = round(3*depth),
-                     shortcut   = True,
-                     act_type   = act_type,
-                     norm_type  = norm_type,
-                     depthwise  = depthwise)
-        )
-        ## P3/8
-        self.layer_3 = nn.Sequential(
-            Conv(self.feat_dims[1], self.feat_dims[2], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            RTCBlock(in_dim     = self.feat_dims[2],
-                     out_dim    = self.feat_dims[2],
-                     num_blocks = round(6*depth),
-                     shortcut   = True,
-                     act_type   = act_type,
-                     norm_type  = norm_type,
-                     depthwise  = depthwise)
-        )
-        ## P4/16
-        self.layer_4 = nn.Sequential(
-            Conv(self.feat_dims[2], self.feat_dims[3], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            RTCBlock(in_dim     = self.feat_dims[3],
-                     out_dim    = self.feat_dims[3],
-                     num_blocks = round(6*depth),
-                     shortcut   = True,
-                     act_type   = act_type,
-                     norm_type  = norm_type,
-                     depthwise  = depthwise)
-        )
-        ## P5/32
-        self.layer_5 = nn.Sequential(
-            Conv(self.feat_dims[3], self.feat_dims[4], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            RTCBlock(in_dim     = self.feat_dims[4],
-                     out_dim    = self.feat_dims[4],
-                     num_blocks = round(3*depth),
-                     shortcut   = True,
-                     act_type   = act_type,
-                     norm_type  = norm_type,
-                     depthwise  = depthwise)
-        )
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## Build Backbone network
-def build_backbone(cfg, pretrained=False): 
-    # build backbone model
-    backbone = RTCBackbone(width=cfg['width'],
-                           depth=cfg['depth'],
-                           ratio=cfg['ratio'],
-                           act_type=cfg['bk_act'],
-                           norm_type=cfg['bk_norm'],
-                           depthwise=cfg['bk_depthwise']
-                           )
-    feat_dims = backbone.feat_dims[-3:]
-
-    # Model name
-    width, depth, ratio = cfg['width'], cfg['depth'], cfg['ratio']
-    if  width == 0.25   and depth == 0.34 and ratio == 2.0:
-        model_name = "rtcnet_n"
-    elif width == 0.50  and depth == 0.34 and ratio == 2.0:
-        model_name = "rtcnet_s"
-    elif width == 0.75  and depth == 0.67 and ratio == 1.5:
-        model_name = "rtcnet_m"
-    elif width == 1.0   and depth == 1.0  and ratio == 1.0:
-        model_name = "rtcnet_l"
-    elif width == 1.25  and depth == 1.34  and ratio == 1.0:
-        model_name = "rtcnet_x"
-    else:
-        raise NotImplementedError("No such model size : width={}, depth={}, ratio={}. ".format(width, depth, ratio))
-
-    # Load pretrained weight
-    if pretrained:
-        backbone = load_pretrained_weight(backbone, model_name)
-        
-    return backbone, feat_dims
-
-## Load pretrained weight
-def load_pretrained_weight(model, model_name):
-    # Load pretrained weight
-    url = model_urls[model_name]
-    if url is not None:
-        print('Loading pretrained weight ...')
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print('Unused key: ', k)
-        # load the weight
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No backbone pretrained for {}.'.format(model_name))
-
-    return model
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'bk_pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'ratio': 2.0,
-    }
-    model, feats = build_backbone(cfg, pretrained=cfg['bk_pretrained'])
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))
-
-
-    for n, p in model.named_parameters():
-        print(n)

+ 0 - 137
models/detectors/rtcdet/rtcdet_basic.py

@@ -1,137 +0,0 @@
-import torch
-import torch.nn as nn
-
-
-# --------------------- Basic modules ---------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-        
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# --------------------- Yolov8 modules ---------------------
-## Yolov8-style BottleNeck
-class Bottleneck(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expand_ratio = 0.5,
-                 kernel_sizes = [3, 3],
-                 shortcut     = True,
-                 act_type     = 'silu',
-                 norm_type    = 'BN',
-                 depthwise    = False,):
-        super(Bottleneck, self).__init__()
-        inter_dim = int(out_dim * expand_ratio)  # hidden channels            
-        self.cv1 = Conv(in_dim, inter_dim, k=kernel_sizes[0], p=kernel_sizes[0]//2, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.cv2 = Conv(inter_dim, out_dim, k=kernel_sizes[1], p=kernel_sizes[1]//2, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
-        self.shortcut = shortcut and in_dim == out_dim
-
-    def forward(self, x):
-        h = self.cv2(self.cv1(x))
-
-        return x + h if self.shortcut else h
-
-# Yolov8-style StageBlock
-class RTCBlock(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 num_blocks = 1,
-                 shortcut   = False,
-                 act_type   = 'silu',
-                 norm_type  = 'BN',
-                 depthwise  = False,):
-        super(RTCBlock, self).__init__()
-        self.inter_dim = out_dim // 2
-        self.input_proj = Conv(in_dim, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.m = nn.Sequential(*(
-            Bottleneck(self.inter_dim, self.inter_dim, 1.0, [3, 3], shortcut, act_type, norm_type, depthwise)
-            for _ in range(num_blocks)))
-        self.output_proj = Conv((2 + num_blocks) * self.inter_dim, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        # Input proj
-        x1, x2 = torch.chunk(self.input_proj(x), 2, dim=1)
-        out = list([x1, x2])
-
-        # Bottlenecl
-        out.extend(m(out[-1]) for m in self.m)
-
-        # Output proj
-        out = self.output_proj(torch.cat(out, dim=1))
-
-        return out
-    

+ 0 - 191
models/detectors/rtcdet/rtcdet_head.py

@@ -1,191 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import Conv
-except:
-    from rtcdet_basic import Conv
-
-
-def build_det_head(cfg, in_dims, cls_head_dim, reg_head_dim, num_levels=3):
-    head = MDetHead(cfg, in_dims, cls_head_dim, reg_head_dim, num_levels)
-
-    return head
-
-def build_seg_head(cfg, in_dims, out_dim):
-    return MaskHead()
-
-def build_pose_head(cfg, in_dims, out_dim):
-    return PoseHead()
-
-
-# ---------------------------- Detection Head ----------------------------
-## Single-level Detection Head
-class SDetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 depthwise    :bool = False):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                        act_type=act_type,
-                        norm_type=norm_type,
-                        depthwise=depthwise)
-                        )      
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class MDetHead(nn.Module):
-    def __init__(self, cfg, in_dims, cls_head_dim, reg_head_dim, num_levels=3):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SDetHead(in_dim       = in_dims[level],
-                      cls_head_dim = cls_head_dim,
-                      reg_head_dim = reg_head_dim,
-                      num_cls_head = cfg['num_cls_head'],
-                      num_reg_head = cfg['num_reg_head'],
-                      act_type     = cfg['head_act'],
-                      norm_type    = cfg['head_norm'],
-                      depthwise    = cfg['head_depthwise'])
-                      for level in range(num_levels)
-                      ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        outputs = {
-            "cls_feat": cls_feats,
-            "reg_feat": reg_feats
-        }
-
-        return outputs
-
-
-# ---------------------------- Segmentation Head ----------------------------
-class MaskHead(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-
-    def forward(self, x):
-        return
-
-
-# ---------------------------- Human-Pose Head ----------------------------
-class PoseHead(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-
-    def forward(self, x):
-        return
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'head': 'decoupled_head',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'head_depthwise': False,
-        'reg_max': 16,
-    }
-    fpn_dims = [256, 256, 256]
-    out_dim = 256
-    # Head-1
-    model = build_det_head(cfg, fpn_dims, out_dim, num_levels=3)
-    print(model)
-    fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
-    t0 = time.time()
-    outputs = model(fpn_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
-    print('==============================')
-    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-1: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 108
models/detectors/rtcdet/rtcdet_neck.py

@@ -1,108 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import Conv
-except:
-    from rtcdet_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        # ---------------- Basic Parameters ----------------
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        # ---------------- Network Parameters ----------------
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio):
-        super(SPPFBlockCSP, self).__init__()
-        # ---------------- Basic Parameters ----------------
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        # ---------------- Network Parameters ----------------
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-
-    return neck
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-    }
-    in_dim = 512
-    out_dim = 512
-    # Head-1
-    model = build_neck(cfg, in_dim, out_dim)
-    feat = torch.randn(1, in_dim, 20, 20)
-    t0 = time.time()
-    outputs = model(feat)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(feat, ), verbose=False)
-    print('==============================')
-    print('FPN: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('FPN: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 155
models/detectors/rtcdet/rtcdet_pafpn.py

@@ -1,155 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-try:
-    from .rtcdet_basic import Conv, RTCBlock
-except:
-    from rtcdet_basic import Conv, RTCBlock
-
-
-# PaFPN of RTCDet
-class RTCDetPaFPN(nn.Module):
-    def __init__(self, 
-                 in_dims   = [256, 512, 512],
-                 width     = 1.0,
-                 depth     = 1.0,
-                 ratio     = 1.0,
-                 act_type  = 'silu',
-                 norm_type = 'BN',
-                 depthwise = False):
-        super(RTCDetPaFPN, self).__init__()
-        print('==============================')
-        print('FPN: {}'.format("RTCDet PaFPN"))
-        # ---------------- Basic parameters ----------------
-        self.in_dims = in_dims
-        self.width = width
-        self.depth = depth
-        self.out_dim = [round(256 * width), round(512 * width), round(512 * width * ratio)]
-        c3, c4, c5 = in_dims
-
-        # ---------------- Top dwon ----------------
-        ## P5 -> P4
-        self.top_down_layer_1 = RTCBlock(in_dim       = c5 + c4,
-                                         out_dim      = round(512*width),
-                                         num_blocks   = round(3*depth),
-                                         shortcut     = False,
-                                         act_type     = act_type,
-                                         norm_type    = norm_type,
-                                         depthwise    = depthwise,
-                                         )
-        ## P4 -> P3
-        self.top_down_layer_2 = RTCBlock(in_dim       = round(512*width) + c3,
-                                         out_dim      = round(256*width),
-                                         num_blocks   = round(3*depth),
-                                         shortcut     = False,
-                                         act_type     = act_type,
-                                         norm_type    = norm_type,
-                                         depthwise    = depthwise,
-                                         )
-        # ---------------- Bottom up ----------------
-        ## P3 -> P4
-        self.dowmsample_layer_1 = Conv(round(256*width), round(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.bottom_up_layer_1  = RTCBlock(in_dim       = round(256*width) + round(512*width),
-                                           out_dim      = round(512*width),
-                                           num_blocks   = round(3*depth),
-                                           shortcut     = False,
-                                           act_type     = act_type,
-                                           norm_type    = norm_type,
-                                           depthwise    = depthwise,
-                                           )
-        ## P4 -> P5
-        self.dowmsample_layer_2 = Conv(round(512*width), round(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.bottom_up_layer_2 = RTCBlock(in_dim       = round(512 * width) + c5,
-                                          out_dim      = round(512 * width * ratio),
-                                          num_blocks   = round(3*depth),
-                                          shortcut     = False,
-                                          act_type     = act_type,
-                                          norm_type    = norm_type,
-                                          depthwise    = depthwise,
-                                          )
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # Top down
-        ## P5 -> P4
-        c6 = F.interpolate(c5, scale_factor=2.0)
-        c7 = torch.cat([c6, c4], dim=1)
-        c8 = self.top_down_layer_1(c7)
-        ## P4 -> P3
-        c9 = F.interpolate(c8, scale_factor=2.0)
-        c10 = torch.cat([c9, c3], dim=1)
-        c11 = self.top_down_layer_2(c10)
-
-        # Bottom up
-        # p3 -> P4
-        c12 = self.dowmsample_layer_1(c11)
-        c13 = torch.cat([c12, c8], dim=1)
-        c14 = self.bottom_up_layer_1(c13)
-        # P4 -> P5
-        c15 = self.dowmsample_layer_2(c14)
-        c16 = torch.cat([c15, c5], dim=1)
-        c17 = self.bottom_up_layer_2(c16)
-
-        out_feats = [c11, c14, c17] # [P3, P4, P5]
-        
-        return out_feats
-
-
-def build_fpn(cfg, in_dims):
-    model = cfg['fpn']
-    # build neck
-    if model == 'rtcdet_pafpn':
-        fpn_net = RTCDetPaFPN(in_dims   = in_dims,
-                              width     = cfg['width'],
-                              depth     = cfg['depth'],
-                              ratio     = cfg['ratio'],
-                              act_type  = cfg['fpn_act'],
-                              norm_type = cfg['fpn_norm'],
-                              depthwise = cfg['fpn_depthwise']
-                              )
-    else:
-        raise NotImplementedError
-    
-    return fpn_net
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'fpn': 'rtcdet_pafpn',
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'ratio': 1.0
-    }
-    fpn_dims = [256, 512, 512]
-    out_dim=256
-    model = build_fpn(cfg, fpn_dims, out_dim)
-    pyramid_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
-    t0 = time.time()
-    outputs = model(pyramid_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(pyramid_feats, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 200
models/detectors/rtcdet/rtcdet_pred.py

@@ -1,200 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-def build_det_pred(cls_dim, reg_dim, strides, num_classes, num_coords=4, reg_max=16, num_levels=3):
-    pred_layers = MDetPDLayer(cls_dim     = cls_dim,
-                              reg_dim     = reg_dim,
-                              strides     = strides,
-                              num_classes = num_classes,
-                              num_coords  = num_coords,
-                              num_levels  = num_levels,
-                              reg_max     = reg_max) 
-
-    return pred_layers
-
-def build_seg_pred():
-    return MaskPDLayer()
-
-def build_pose_pred():
-    return PosePDLayer()
-
-
-# ---------------------------- Detection predictor ----------------------------
-## Single-level Detection Prediction Layer
-class SDetPDLayer(nn.Module):
-    def __init__(self,
-                 cls_dim     :int = 256,
-                 reg_dim     :int = 256,
-                 stride      :int = 32,
-                 reg_max     :int = 16,
-                 num_classes :int = 80,
-                 num_coords  :int = 4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.reg_max = reg_max
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred bias
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # pred
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # generate anchor boxes: [M, 4]
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-        # stride tensor: [M, 1]
-        stride_tensor = torch.ones_like(anchors[..., :1]) * self.stride
-        
-        # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-        
-        # output dict
-        outputs = {"pred_cls": cls_pred,            # List(Tensor) [B, M, C]
-                   "pred_reg": reg_pred,            # List(Tensor) [B, M, 4*(reg_max)]
-                   "anchors": anchors,              # List(Tensor) [M, 2]
-                   "strides": self.stride,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": stride_tensor   # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-
-## Multi-level pred layer
-class MDetPDLayer(nn.Module):
-    def __init__(self,
-                 cls_dim,
-                 reg_dim,
-                 strides,
-                 num_classes :int = 80,
-                 num_coords  :int = 4,
-                 num_levels  :int = 3,
-                 reg_max     :int = 16):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-        self.num_levels = num_levels
-        self.reg_max = reg_max
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [SDetPDLayer(cls_dim     = cls_dim,
-                         reg_dim     = reg_dim,
-                         stride      = strides[level],
-                         reg_max     = reg_max,
-                         num_classes = num_classes,
-                         num_coords  = num_coords * reg_max)
-                         for level in range(num_levels)
-                         ])
-        ## proj conv
-        proj_init = torch.arange(reg_max, dtype=torch.float)
-        self.proj_conv = nn.Conv2d(self.reg_max, 1, kernel_size=1, bias=False).requires_grad_(False)
-        self.proj_conv.weight.data[:] = nn.Parameter(proj_init.view([1, reg_max, 1, 1]))
-
-    def forward(self, inputs):
-        cls_feats, reg_feats = inputs['cls_feat'], inputs['reg_feat']
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        all_delta_preds = []
-        for level in range(self.num_levels):
-            # -------------- Single-level prediction --------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # -------------- Decode bbox --------------
-            B, M = outputs["pred_reg"].shape[:2]
-            # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max]
-            delta_pred = outputs["pred_reg"].reshape([B, M, 4, self.reg_max])
-            # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-            delta_pred = delta_pred.permute(0, 3, 2, 1).contiguous()
-            # [B, reg_max, 4, M] -> [B, 1, 4, M]
-            delta_pred = self.proj_conv(F.softmax(delta_pred, dim=1))
-            # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-            delta_pred = delta_pred.view(B, 4, M).permute(0, 2, 1).contiguous()
-            ## tlbr -> xyxy
-            x1y1_pred = outputs["anchors"][None] - delta_pred[..., :2] * self.strides[level]
-            x2y2_pred = outputs["anchors"][None] + delta_pred[..., 2:] * self.strides[level]
-            box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-            # collect results
-            all_cls_preds.append(outputs["pred_cls"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_box_preds.append(box_pred)
-            all_delta_preds.append(delta_pred)
-            all_anchors.append(outputs["anchors"])
-            all_strides.append(outputs["stride_tensor"])
-        
-        # output dict
-        outputs = {"pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                   "pred_reg": all_reg_preds,        # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                   "pred_delta": all_delta_preds,    # List(Tensor) [B, M, 4]
-                   "anchors": all_anchors,           # List(Tensor) [M, 2]
-                   "strides": self.strides,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": all_strides      # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-
-
-# -------------------- Segmentation predictor --------------------
-class MaskPDLayer(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-    
-    def forward(self, x):
-        return
-
-
-# -------------------- Human-Pose predictor --------------------
-class PosePDLayer(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-    
-    def forward(self, x):
-        return

+ 2 - 2
models/detectors/yolov8/README.md

@@ -17,13 +17,13 @@
 ### Single GPU
 Taking training YOLOv8-S on COCO as the example,
 ```Shell
-python train.py --cuda -d coco --root path/to/coco -m yolov8_s -bs 16 -size 640 --wp_epoch 3 --max_epoch 300 --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --multi_scale 
+python train.py --cuda -d coco --root path/to/coco -m yolov8_s -bs 16 -size 640 --wp_epoch 3 --max_epoch 500 --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --multi_scale 
 ```
 
 ### Multi GPU
 Taking training YOLOv8 on COCO as the example,
 ```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root /data/datasets/ -m yolov8_s -bs 128 -size 640 --wp_epoch 3 --max_epoch 300  --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --sybn --multi_scale --save_folder weights/ 
+python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root /data/datasets/ -m yolov8_s -bs 128 -size 640 --wp_epoch 3 --max_epoch 500  --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --sybn --multi_scale --save_folder weights/ 
 ```
 
 ## Test YOLOv8

+ 58 - 35
models/detectors/yolov8/matcher.py

@@ -36,7 +36,7 @@ class TaskAlignedAssigner(nn.Module):
         target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
             mask_pos, overlaps, self.n_max_boxes)
 
-        # assigned target
+        # Assigned target
         target_labels, target_bboxes, target_scores = self.get_targets(
             gt_labels, gt_bboxes, target_gt_idx, fg_mask)
 
@@ -50,28 +50,36 @@ class TaskAlignedAssigner(nn.Module):
         return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
 
     def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
         # get in_gts mask, (b, max_num_obj, h*w)
         mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
+        # get anchor_align metric, (b, max_num_obj, h*w)
+        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts)
         # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
+        mask_topk = self.select_topk_candidates(align_metric)
         # merge all mask to a final mask, (b, max_num_obj, h*w)
         mask_pos = mask_topk * mask_in_gts
 
         return mask_pos, align_metric, overlaps
 
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
+    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts):
+        """Compute alignment metric given predicted and ground truth bounding boxes."""
+        na = pd_bboxes.shape[-2]
+        mask_in_gts = mask_in_gts.bool()  # b, max_num_obj, h*w
+        overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device)
+        bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device)
+
         ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.long().squeeze(-1)  # b, max_num_obj
-        # get the scores of each grid for each gt cls
-        bbox_scores = pd_scores[ind[0], :, ind[1]]  # b, max_num_obj, h*w
+        ind[0] = torch.arange(end=self.bs).view(-1, 1).expand(-1, self.n_max_boxes)  # b, max_num_obj
+        ind[1] = gt_labels.squeeze(-1)  # b, max_num_obj
+        # Get the scores of each grid for each gt cls
+        bbox_scores[mask_in_gts] = pd_scores[ind[0], :, ind[1]][mask_in_gts]  # b, max_num_obj, h*w
 
-        overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False,
-                            CIoU=True).squeeze(3).clamp(0)
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
+        # (b, max_num_obj, 1, 4), (b, 1, h*w, 4)
+        pd_boxes = pd_bboxes.unsqueeze(1).expand(-1, self.n_max_boxes, -1, -1)[mask_in_gts]
+        gt_boxes = gt_bboxes.unsqueeze(2).expand(-1, -1, na, -1)[mask_in_gts]
+        overlaps[mask_in_gts] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0)
 
+        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
         return align_metric, overlaps
 
     def select_topk_candidates(self, metrics, largest=True):
@@ -80,31 +88,42 @@ class TaskAlignedAssigner(nn.Module):
             metrics: (b, max_num_obj, h*w).
             topk_mask: (b, max_num_obj, topk) or None
         """
-        num_anchors = metrics.shape[-1]  # h*w
         # (b, max_num_obj, topk)
         topk_metrics, topk_idxs = torch.topk(metrics, self.topk_candidates, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk_candidates])
+        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs)
         # (b, max_num_obj, topk)
-        topk_idxs[~topk_mask] = 0
+        topk_idxs.masked_fill_(~topk_mask, 0)
+
         # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
-        # filter invalid bboxes
-        is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
-        
-        return is_in_topk.to(metrics.dtype)
+        count_tensor = torch.zeros(metrics.shape, dtype=torch.int8, device=topk_idxs.device)
+        ones = torch.ones_like(topk_idxs[:, :, :1], dtype=torch.int8, device=topk_idxs.device)
+        for k in range(self.topk_candidates):
+            # Expand topk_idxs for each value of k and add 1 at the specified positions
+            count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones)
+        # count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device))
+        # Filter invalid bboxes
+        count_tensor.masked_fill_(count_tensor > 1, 0)
+
+        return count_tensor.to(metrics.dtype)
 
     def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        # assigned target labels, (b, 1)
+        # Assigned target labels, (b, 1)
         batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
         target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
         target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
 
-        # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
+        # Assigned target boxes, (b, max_num_obj, 4) -> (b, h*w, 4)
         target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
 
-        # assigned target scores
-        target_labels.clamp(0)
-        target_scores = F.one_hot(target_labels, self.num_classes)  # (b, h*w, 80)
+        # Assigned target scores
+        target_labels.clamp_(0)
+
+        # 10x faster than F.one_hot()
+        target_scores = torch.zeros((target_labels.shape[0], target_labels.shape[1], self.num_classes),
+                                    dtype=torch.int64,
+                                    device=target_labels.device)  # (b, h*w, 80)
+        target_scores.scatter_(2, target_labels.unsqueeze(-1), 1)
+
         fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
         target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
 
@@ -143,16 +162,20 @@ def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
         fg_mask (Tensor): shape(bs, num_total_anchors)
         mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
     """
-    fg_mask = mask_pos.sum(axis=-2)
-    if fg_mask.max() > 1:
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
-        max_overlaps_idx = overlaps.argmax(axis=1)
-        is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
-        is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
-        fg_mask = mask_pos.sum(axis=-2)
-    target_gt_idx = mask_pos.argmax(axis=-2)
-    return target_gt_idx, fg_mask , mask_pos
+    fg_mask = mask_pos.sum(-2)
+    if fg_mask.max() > 1:  # one anchor is assigned to multiple gt_bboxes
+        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).expand(-1, n_max_boxes, -1)  # (b, n_max_boxes, h*w)
+        max_overlaps_idx = overlaps.argmax(1)  # (b, h*w)
+
+        is_max_overlaps = torch.zeros(mask_pos.shape, dtype=mask_pos.dtype, device=mask_pos.device)
+        is_max_overlaps.scatter_(1, max_overlaps_idx.unsqueeze(1), 1)
+
+        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos).float()  # (b, n_max_boxes, h*w)
+        fg_mask = mask_pos.sum(-2)
+    # Find each grid serve which gt(index)
+    target_gt_idx = mask_pos.argmax(-2)  # (b, h*w)
+
+    return target_gt_idx, fg_mask, mask_pos
 
 def iou_calculator(box1, box2, eps=1e-9):
     """Calculate iou for batch

+ 10 - 0
models/detectors/yolov8/yolov8_backbone.py

@@ -60,6 +60,16 @@ class Yolov8Backbone(nn.Module):
                              depthwise  = depthwise)
         )
 
+        self.init_weights()
+        
+    def init_weights(self):
+        """Initialize the parameters."""
+        for m in self.modules():
+            if isinstance(m, torch.nn.Conv2d):
+                # In order to be consistent with the source code,
+                # reset the Conv2d initialization parameters
+                m.reset_parameters()
+
     def forward(self, x):
         c1 = self.layer_1(x)
         c2 = self.layer_2(c1)

+ 1 - 7
train.sh

@@ -10,13 +10,7 @@ RESUME=$7
 # MODEL setting
 IMAGE_SIZE=640
 FIND_UNUSED_PARAMS=False
-if [[ $MODEL == *"rtcdet"* ]]; then
-    # Epoch setting
-    MAX_EPOCH=500
-    WP_EPOCH=3
-    EVAL_EPOCH=10
-    NO_AUG_EPOCH=20
-elif [[ $MODEL == *"rtdetr"* ]]; then
+if [[ $MODEL == *"rtdetr"* ]]; then
     # Epoch setting
     MAX_EPOCH=72
     WP_EPOCH=-1