浏览代码

remove RTCDet

yjh0410 2 年之前
父节点
当前提交
573f38f8b1

+ 0 - 10
config/__init__.py

@@ -91,10 +91,6 @@ from .model_config.yolov5_config import yolov5_cfg
 from .model_config.yolov7_config import yolov7_cfg
 from .model_config.yolov8_config import yolov8_cfg
 from .model_config.yolox_config import yolox_cfg
-## My RTCDet series
-from .model_config.rtcdet_config import rtcdet_cfg
-## My RTRDet series
-from .model_config.rtrdet_config import rtrdet_cfg
 
 def build_model_config(args):
     print('==============================')
@@ -123,12 +119,6 @@ def build_model_config(args):
     # YOLOX
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         cfg = yolox_cfg[args.model]
-    # RTCDet
-    elif args.model in ['rtcdet_p', 'rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
-        cfg = rtcdet_cfg[args.model]
-    # RTRDet
-    elif args.model in ['rtrdet_p', 'rtrdet_n', 'rtrdet_t', 'rtrdet_s', 'rtrdet_m', 'rtrdet_l', 'rtrdet_x']:
-        cfg = rtrdet_cfg[args.model]
 
     return cfg
 

+ 0 - 390
config/model_config/rtcdet_config.py

@@ -1,390 +0,0 @@
-# RTCDet-v2 Config
-
-
-rtcdet_cfg = {
-    'rtcdet_p':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': True,
-        'width': 0.25,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': True,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': True,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': True,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.4, 1.0], # 256 -> 640
-        'trans_type': 'yolov5_pico',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_n':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.25,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_nano',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_t':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.375,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_small',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_s':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.50,
-        'depth': 0.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_small',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_m':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 0.75,
-        'depth': 0.67,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_medium',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_large',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-    'rtcdet_x':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': False,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.25,
-        'depth': 1.34,
-        'stride': [8, 16, 32],  # P3, P4, P5
-        'max_stride': 32,
-        'reg_max': 16,
-        ## Neck: SPP
-        'neck': 'sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-        ## Neck: PaFPN
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elanblock',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-        ## Head
-        'head': 'decoupled_head',
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_depthwise': False,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.5], # 320 -> 960
-        'trans_type': 'yolov5_huge',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "tal",
-        'matcher_hpy': {'topk_candidates': 10,
-                        'alpha': 0.5,
-                        'beta':  6.0},
-        # ---------------- Loss config ----------------
-        'loss_cls_weight': 0.5,
-        'loss_dfl_weight': 1.5,
-        'loss_box_weight': 7.5,
-        # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
-    },
-
-}

+ 0 - 60
config/model_config/rtrdet_config.py

@@ -1,60 +0,0 @@
-# Real-time Detection with Transformer
-
-
-rtrdet_cfg = {
-    'rtrdet_l':{
-        # ---------------- Model config ----------------
-        ## Backbone
-        'backbone': 'elannet',
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-        'max_stride': 32,
-        'out_stride': 16,
-        'd_model': 512,
-        ## Transformer Encoder
-        'transformer': 'RTRDetTransformer',
-        'num_encoder': 1,
-        'encoder_num_head': 8,
-        'encoder_mlp_ratio': 4.0,
-        'encoder_dropout': 0.1,
-        'neck_depthwise': False,
-        'encoder_act': 'relu',
-        ## Transformer Decoder
-        'num_decoder': 6,
-        'stop_layer_id': -1,
-        'decoder_num_head': 8,
-        'decoder_mlp_ratio': 4.0,
-        'decoder_dropout': 0.1,
-        'decoder_act': 'relu',
-        'decoder_num_queries': 100,
-        'decoder_num_pattern': 3,
-        'spatial_prior': 'learned',  # 'learned', 'grid'
-        'num_topk': 100,
-        # ---------------- Train config ----------------
-        ## Input
-        'multi_scale': [0.5, 1.0], # 320 -> 640
-        'trans_type': 'rtrdet_large',
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        'matcher': "hungarian_matcher",
-        'matcher_hpy': {"hungarian_matcher": {'cost_cls_weight':  2.0,
-                                              'cost_box_weight':  5.0,
-                                              'cost_giou_weight': 2.0,
-                                              },
-                        },
-        # ---------------- Loss config ----------------
-        ## Loss weight
-        'ema_update': False,
-        'loss_weights': {"hungarian_matcher": {'loss_cls_weight':  1.0,
-                                               'loss_box_weight':  5.0,
-                                               'loss_giou_weight': 2.0},
-                        },
-        # ---------------- Train config ----------------
-        'trainer_type': 'rtrdet',
-    },
-
-}

+ 0 - 12
models/detectors/__init__.py

@@ -11,10 +11,6 @@ from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
 from .yolov8.build import build_yolov8
 from .yolox.build import build_yolox
-# My RTCDet
-from .rtcdet.build import build_rtcdet
-# My RTRDet
-from .rtrdet.build import build_rtrdet
 
 
 # build object detector
@@ -56,14 +52,6 @@ def build_model(args,
     elif args.model in ['yolox_n', 'yolox_s', 'yolox_m', 'yolox_l', 'yolox_x']:
         model, criterion = build_yolox(
             args, model_cfg, device, num_classes, trainable, deploy)
-    # RTCDet
-    elif args.model in ['rtcdet_p', 'rtcdet_n', 'rtcdet_t', 'rtcdet_s', 'rtcdet_m', 'rtcdet_l', 'rtcdet_x']:
-        model, criterion = build_rtcdet(
-            args, model_cfg, device, num_classes, trainable, deploy)
-    # RTRDet
-    elif args.model in ['rtrdet_p', 'rtrdet_n', 'rtrdet_t', 'rtrdet_s', 'rtrdet_m', 'rtrdet_l', 'rtrdet_x']:
-        model, criterion = build_rtrdet(
-            args, model_cfg, device, num_classes, trainable, deploy)
 
     if trainable:
         # Load pretrained weight

+ 0 - 64
models/detectors/rtcdet/README.md

@@ -1,64 +0,0 @@
-# RTCDet: My Empirical Study of Real-Time Convolutional Object Detectors.
-
-|   Model  | Scale | Batch | AP<sup>test<br>0.5:0.95 | AP<sup>test<br>0.5 | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|----------|-------|-------|-------------------------|--------------------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDet-N |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDet-T |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDet-S |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDet-M |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDet-L |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-| RTCDet-X |  640  | 8xb16 |                         |                    |                        |                   |                   |                    |  |
-
-|   Model  | Scale | Batch | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
-|----------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
-| RTCDet-P |  320  | 8xb16 |                        |                   |                   |                    | - |
-| RTCDet-P |  416  | 8xb16 |                        |                   |                   |                    | - |
-| RTCDet-P |  512  | 8xb16 |                        |                   |                   |                    | - |
-| RTCDet-P |  640  | 8xb16 |                        |                   |                   |                    | - |
-
-- For training, we train my RTCDet series series with 300 epochs on COCO.
-- For data augmentation, we use the large scale jitter (LSJ), Mosaic augmentation and Mixup augmentation, following the setting of [YOLOX](https://github.com/ultralytics/yolov5), but we remove the rotation transformation which is used in YOLOX's strong augmentation.
-- For optimizer, we use AdamW with weight decay 0.05 and base per image lr 0.001 / 64.
-- For learning rate scheduler, we use linear decay scheduler.
-- Due to my limited computing resources, I can not train `RTCDet-X` with the setting of `batch size=128`.
-
-## Train RTCDet
-### Single GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m rtcdet_s -bs 16 -size 640 --wp_epoch 3 --max_epoch 300 --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --multi_scale 
-```
-
-### Multi GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root /data/datasets/ -m rtcdet_s -bs 128 -size 640 --wp_epoch 3 --max_epoch 300  --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --sybn --multi_scale --save_folder weights/ 
-```
-
-## Test RTCDet
-Taking testing RTCDet-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m rtcdet_s --weight path/to/rtcdet_s.pth -size 640 -vt 0.4 --show 
-```
-
-## Evaluate RTCDet
-Taking evaluating RTCDet-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco-val --root path/to/coco -m rtcdet_s --weight path/to/rtcdet_s.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m rtcdet_s --weight path/to/weight -size 640 -vt 0.4 --show --gif
-```

+ 0 - 41
models/detectors/rtcdet/build.py

@@ -1,41 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .rtcdet import RTCDet
-
-
-# build object detector
-def build_rtcdet(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-        
-    # -------------- Build RTCDet --------------
-    model = RTCDet(
-        cfg=cfg,
-        device=device, 
-        num_classes=num_classes,
-        trainable=trainable,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        topk=args.topk,
-        deploy=deploy,
-        nms_class_agnostic=args.nms_class_agnostic
-        )
-
-    # -------------- Initialize RTCDet --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, device, num_classes)
-        
-    return model, criterion

+ 0 - 303
models/detectors/rtcdet/loss.py

@@ -1,303 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from utils.box_ops import bbox2dist, bbox_iou
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import TaskAlignedAssigner
-
-
-class Criterion(object):
-    def __init__(self, cfg, device, num_classes=80):
-        # --------------- Basic parameters ---------------
-        self.cfg = cfg
-        self.device = device
-        self.num_classes = num_classes
-        self.reg_max = cfg['reg_max']
-        self.use_dfl = cfg['reg_max'] > 1
-        # --------------- Loss config ---------------
-        self.loss_cls_weight = cfg['loss_cls_weight']
-        self.loss_box_weight = cfg['loss_box_weight']
-        self.loss_dfl_weight = cfg['loss_dfl_weight']
-        # --------------- Matcher config ---------------
-        self.matcher_hpy = cfg['matcher_hpy']
-        self.matcher = TaskAlignedAssigner(num_classes     = num_classes,
-                                           topk_candidates = self.matcher_hpy['topk_candidates'],
-                                           alpha           = self.matcher_hpy['alpha'],
-                                           beta            = self.matcher_hpy['beta']
-                                           )
-
-    def loss_classes(self, pred_cls, gt_score):
-        # compute bce loss
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
-
-        return loss_cls
-    
-    def loss_bboxes(self, pred_box, gt_box, bbox_weight):
-        # regression loss
-        ious = bbox_iou(pred_box, gt_box, xywh=False, CIoU=True)
-        loss_box = (1.0 - ious.squeeze(-1)) * bbox_weight
-
-        return loss_box
-    
-    def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
-        # rescale coords by stride
-        gt_box_s = gt_box / stride
-        anchor_s = anchor / stride
-
-        # compute deltas
-        gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.cfg['reg_max'] - 1)
-
-        gt_left = gt_ltrb_s.to(torch.long)
-        gt_right = gt_left + 1
-
-        weight_left = gt_right.to(torch.float) - gt_ltrb_s
-        weight_right = 1 - weight_left
-
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_reg.view(-1, self.cfg['reg_max']),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_reg.view(-1, self.cfg['reg_max']),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss_dfl = (loss_left + loss_right).mean(-1)
-        
-        if bbox_weight is not None:
-            loss_dfl *= bbox_weight
-
-        return loss_dfl
-
-    def __call__(self, outputs, targets, epoch=0):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_reg']: List(Tensor) [B, M, 4*(reg_max+1)]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['anchors']: List(Tensor) [M, 2]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            outputs['stride_tensor']: List(Tensor) [M, 1]
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        strides = outputs['stride_tensor']
-        anchors = outputs['anchors']
-        anchors = torch.cat(anchors, dim=0)
-        num_anchors = anchors.shape[0]
-
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        
-        # --------------- label assignment ---------------
-        gt_score_targets = []
-        gt_bbox_targets = []
-        fg_masks = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)     # [Mp,]
-            tgt_boxs = targets[batch_idx]["boxes"].to(device)        # [Mp, 4]
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
-                # There is no valid gt
-                fg_mask = cls_preds.new_zeros(1, num_anchors).bool()               #[1, M,]
-                gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
-                gt_box = cls_preds.new_zeros((1, num_anchors, 4))                  #[1, M, 4]
-            else:
-                tgt_labels = tgt_labels[None, :, None]      # [1, Mp, 1]
-                tgt_boxs = tgt_boxs[None]                   # [1, Mp, 4]
-                (
-                    _,
-                    gt_box,     # [1, M, 4]
-                    gt_score,   # [1, M, C]
-                    fg_mask,    # [1, M,]
-                    _
-                ) = self.matcher(
-                    pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(), 
-                    pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
-                    anc_points = anchors,
-                    gt_labels = tgt_labels,
-                    gt_bboxes = tgt_boxs
-                    )
-            gt_score_targets.append(gt_score)
-            gt_bbox_targets.append(gt_box)
-            fg_masks.append(fg_mask)
-
-        # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
-        fg_masks = torch.cat(fg_masks, 0).view(-1)                                    # [BM,]
-        gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes)  # [BM, C]
-        gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4)                   # [BM, 4]
-        num_fgs = gt_score_targets.sum()
-        
-        # Average loss normalizer across all the GPUs
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.loss_classes(cls_preds, gt_score_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        box_targets_pos = gt_bbox_targets.view(-1, 4)[fg_masks]
-        bbox_weight = gt_score_targets[fg_masks].sum(-1)
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
-        loss_box = loss_box.sum() / num_fgs
-
-        # ------------------ Distribution focal loss  ------------------
-        ## process anchors
-        anchors = torch.cat(outputs['anchors'], dim=0)
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
-        ## process stride tensors
-        strides = torch.cat(outputs['stride_tensor'], dim=0)
-        strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
-        ## fg preds
-        reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
-        anchors_pos = anchors[fg_masks]
-        strides_pos = strides[fg_masks]
-        ## compute dfl
-        loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos, bbox_weight)
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        if not self.use_dfl:
-            losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight
-            loss_dict = dict(
-                    loss_cls = loss_cls,
-                    loss_box = loss_box,
-                    losses = losses
-            )
-        else:
-            losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight + loss_dfl * self.loss_dfl_weight
-            loss_dict = dict(
-                    loss_cls = loss_cls,
-                    loss_box = loss_box,
-                    loss_dfl = loss_dfl,
-                    losses = losses
-            )
-
-        return loss_dict
-    
-
-class ClassificationLoss(nn.Module):
-    def __init__(self, cfg, reduction='none'):
-        super(ClassificationLoss, self).__init__()
-        self.cfg = cfg
-        self.reduction = reduction
-        # For VFL
-        self.alpha = 0.75
-        self.gamma = 2.0
-
-
-    def binary_cross_entropy(self, pred_logits, gt_score):
-        loss = F.binary_cross_entropy_with_logits(
-            pred_logits.float(), gt_score.float(), reduction='none')
-
-        if self.reduction == 'sum':
-            loss = loss.sum()
-        elif self.reduction == 'mean':
-            loss = loss.mean()
-
-        return loss
-
-
-    def forward(self, pred_logits, gt_score):
-        if self.cfg['cls_loss'] == 'bce':
-            return self.binary_cross_entropy(pred_logits, gt_score)
-
-
-class RegressionLoss(nn.Module):
-    def __init__(self, num_classes, reg_max, use_dfl):
-        super(RegressionLoss, self).__init__()
-        self.num_classes = num_classes
-        self.reg_max = reg_max
-        self.use_dfl = use_dfl
-
-
-    def df_loss(self, pred_regs, target):
-        gt_left = target.to(torch.long)
-        gt_right = gt_left + 1
-        weight_left = gt_right.to(torch.float) - target
-        weight_right = 1 - weight_left
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_regs.view(-1, self.reg_max + 1),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_regs.view(-1, self.reg_max + 1),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss = (loss_left + loss_right).mean(-1, keepdim=True)
-        
-        return loss
-
-
-    def forward(self, pred_regs, pred_boxs, anchors, gt_boxs, bbox_weight, fg_masks, strides):
-        """
-        Input:
-            pred_regs: (Tensor) [BM, 4*(reg_max + 1)]
-            pred_boxs: (Tensor) [BM, 4]
-            anchors: (Tensor) [BM, 2]
-            gt_boxs: (Tensor) [BM, 4]
-            bbox_weight: (Tensor) [BM, 1]
-            fg_masks: (Tensor) [BM,]
-            strides: (Tensor) [BM, 1]
-        """
-        # select positive samples mask
-        num_pos = fg_masks.sum()
-
-        if num_pos > 0:
-            pred_boxs_pos = pred_boxs[fg_masks]
-            gt_boxs_pos = gt_boxs[fg_masks]
-
-            # iou loss
-            ious = bbox_iou(pred_boxs_pos,
-                            gt_boxs_pos,
-                            xywh=False,
-                            CIoU=True)
-            loss_iou = (1.0 - ious) * bbox_weight
-               
-            # dfl loss
-            if self.use_dfl:
-                pred_regs_pos = pred_regs[fg_masks]
-                gt_boxs_s = gt_boxs / strides
-                anchors_s = anchors / strides
-                gt_ltrb_s = bbox2dist(anchors_s, gt_boxs_s, self.reg_max)
-                gt_ltrb_s_pos = gt_ltrb_s[fg_masks]
-                loss_dfl = self.df_loss(pred_regs_pos, gt_ltrb_s_pos)
-                loss_dfl *= bbox_weight
-            else:
-                loss_dfl = pred_regs.sum() * 0.
-
-        else:
-            loss_iou = pred_regs.sum() * 0.
-            loss_dfl = pred_regs.sum() * 0.
-
-        return loss_iou, loss_dfl
-
-
-def build_criterion(cfg, device, num_classes):
-    criterion = Criterion(
-        cfg=cfg,
-        device=device,
-        num_classes=num_classes
-        )
-
-    return criterion
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 176
models/detectors/rtcdet/matcher.py

@@ -1,176 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import bbox_iou
-
-
-# -------------------------- Task Aligned Assigner --------------------------
-class TaskAlignedAssigner(nn.Module):
-    def __init__(self,
-                 num_classes     = 80,
-                 topk_candidates = 10,
-                 alpha           = 0.5,
-                 beta            = 6.0, 
-                 eps             = 1e-9):
-        super(TaskAlignedAssigner, self).__init__()
-        self.topk_candidates = topk_candidates
-        self.num_classes = num_classes
-        self.bg_idx = num_classes
-        self.alpha = alpha
-        self.beta = beta
-        self.eps = eps
-
-    @torch.no_grad()
-    def forward(self,
-                pd_scores,
-                pd_bboxes,
-                anc_points,
-                gt_labels,
-                gt_bboxes):
-        self.bs = pd_scores.size(0)
-        self.n_max_boxes = gt_bboxes.size(1)
-
-        mask_pos, align_metric, overlaps = self.get_pos_mask(
-            pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
-
-        target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
-            mask_pos, overlaps, self.n_max_boxes)
-
-        # assigned target
-        target_labels, target_bboxes, target_scores = self.get_targets(
-            gt_labels, gt_bboxes, target_gt_idx, fg_mask)
-
-        # normalize
-        align_metric *= mask_pos
-        pos_align_metrics = align_metric.amax(axis=-1, keepdim=True)  # b, max_num_obj
-        pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True)  # b, max_num_obj
-        norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
-        target_scores = target_scores * norm_align_metric
-
-        return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
-
-    def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
-        # get anchor_align metric, (b, max_num_obj, h*w)
-        align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
-        # get in_gts mask, (b, max_num_obj, h*w)
-        mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
-        # get topk_metric mask, (b, max_num_obj, h*w)
-        mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
-        # merge all mask to a final mask, (b, max_num_obj, h*w)
-        mask_pos = mask_topk * mask_in_gts
-
-        return mask_pos, align_metric, overlaps
-
-    def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
-        ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long)  # 2, b, max_num_obj
-        ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes)  # b, max_num_obj
-        ind[1] = gt_labels.long().squeeze(-1)  # b, max_num_obj
-        # get the scores of each grid for each gt cls
-        bbox_scores = pd_scores[ind[0], :, ind[1]]  # b, max_num_obj, h*w
-
-        overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False,
-                            CIoU=True).squeeze(3).clamp(0)
-        align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
-
-        return align_metric, overlaps
-
-    def select_topk_candidates(self, metrics, largest=True):
-        """
-        Args:
-            metrics: (b, max_num_obj, h*w).
-            topk_mask: (b, max_num_obj, topk) or None
-        """
-        num_anchors = metrics.shape[-1]  # h*w
-        # (b, max_num_obj, topk)
-        topk_metrics, topk_idxs = torch.topk(metrics, self.topk_candidates, dim=-1, largest=largest)
-        topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk_candidates])
-        # (b, max_num_obj, topk)
-        topk_idxs[~topk_mask] = 0
-        # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
-        is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
-        # filter invalid bboxes
-        is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
-        
-        return is_in_topk.to(metrics.dtype)
-
-    def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
-        # assigned target labels, (b, 1)
-        batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
-        target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes  # (b, h*w)
-        target_labels = gt_labels.long().flatten()[target_gt_idx]  # (b, h*w)
-
-        # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
-        target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
-
-        # assigned target scores
-        target_labels.clamp(0)
-        target_scores = F.one_hot(target_labels, self.num_classes)  # (b, h*w, 80)
-        fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes)  # (b, h*w, 80)
-        target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
-
-        return target_labels, target_bboxes, target_scores
-    
-
-# -------------------------- Basic Functions --------------------------
-def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
-    """select the positive anchors's center in gt
-    Args:
-        xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
-        gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    n_anchors = xy_centers.size(0)
-    bs, n_max_boxes, _ = gt_bboxes.size()
-    _gt_bboxes = gt_bboxes.reshape([-1, 4])
-    xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
-    gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
-    gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
-    b_lt = xy_centers - gt_bboxes_lt
-    b_rb = gt_bboxes_rb - xy_centers
-    bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
-    bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
-    return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
-
-def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
-    """if an anchor box is assigned to multiple gts,
-        the one with the highest iou will be selected.
-    Args:
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-        overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    Return:
-        target_gt_idx (Tensor): shape(bs, num_total_anchors)
-        fg_mask (Tensor): shape(bs, num_total_anchors)
-        mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    fg_mask = mask_pos.sum(axis=-2)
-    if fg_mask.max() > 1:
-        mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
-        max_overlaps_idx = overlaps.argmax(axis=1)
-        is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
-        is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
-        mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
-        fg_mask = mask_pos.sum(axis=-2)
-    target_gt_idx = mask_pos.argmax(axis=-2)
-    return target_gt_idx, fg_mask , mask_pos
-
-def iou_calculator(box1, box2, eps=1e-9):
-    """Calculate iou for batch
-    Args:
-        box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
-        box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
-    Return:
-        (Tensor): shape(bs, n_max_boxes, num_total_anchors)
-    """
-    box1 = box1.unsqueeze(2)  # [N, M1, 4] -> [N, M1, 1, 4]
-    box2 = box2.unsqueeze(1)  # [N, M2, 4] -> [N, 1, M2, 4]
-    px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
-    gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
-    x1y1 = torch.maximum(px1y1, gx1y1)
-    x2y2 = torch.minimum(px2y2, gx2y2)
-    overlap = (x2y2 - x1y1).clip(0).prod(-1)
-    area1 = (px2y2 - px1y1).clip(0).prod(-1)
-    area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
-    union = area1 + area2 - overlap + eps
-
-    return overlap / union

+ 0 - 182
models/detectors/rtcdet/rtcdet.py

@@ -1,182 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .rtcdet_backbone import build_backbone
-from .rtcdet_neck import build_neck
-from .rtcdet_pafpn import build_fpn
-from .rtcdet_head import build_det_head
-from .rtcdet_pred import build_pred_layer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# My RTCDet
-class RTCDet(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes        :int   = 20, 
-                 conf_thresh        :float = 0.05,
-                 nms_thresh         :float = 0.6,
-                 topk               :int   = 1000,
-                 trainable          :bool  = False, 
-                 deploy             :bool  = False,
-                 nms_class_agnostic :bool = False):
-        super(RTCDet, self).__init__()
-        # ---------------------- Basic Parameters ----------------------
-        self.cfg = cfg
-        self.device = device
-        self.strides = cfg['stride']
-        self.reg_max = cfg['reg_max']
-        self.num_levels = len(self.strides)
-        self.num_classes = num_classes
-        self.trainable = trainable
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        self.deploy = deploy
-        self.nms_class_agnostic = nms_class_agnostic
-        self.head_dim = round(256*cfg['width'])
-        
-        # ---------------------- Network Parameters ----------------------
-        ## ----------- Backbone -----------
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
-
-        ## ----------- Neck: SPP -----------
-        self.neck = build_neck(cfg, feats_dim[-1], feats_dim[-1]//2)
-        feats_dim[-1] = self.neck.out_dim
-        
-        ## ----------- Neck: FPN -----------
-        self.fpn = build_fpn(cfg, feats_dim, round(256*cfg['width']))
-        self.fpn_dims = self.fpn.out_dim
-
-        ## ----------- Heads -----------
-        self.det_heads = build_det_head(cfg, self.fpn_dims, self.num_levels, num_classes, self.reg_max)
-
-        ## ----------- Preds -----------
-        self.pred_layers = build_pred_layer(cls_dim     = self.det_heads.cls_head_dim,
-                                            reg_dim     = self.det_heads.reg_head_dim,
-                                            strides     = self.strides,
-                                            num_classes = num_classes,
-                                            num_coords  = 4,
-                                            num_levels  = self.num_levels,
-                                            reg_max     = self.reg_max)
-
-    ## post-process
-    def post_process(self, cls_preds, box_preds):
-        """
-        Input:
-            cls_preds: List(Tensor) [[H x W, C], ...]
-            box_preds: List(Tensor) [[H x W, 4], ...]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            
-            # (H x W x C,)
-            scores_i = cls_pred_i.sigmoid().flatten()
-
-            # Keep top k top scoring indices only.
-            num_topk = min(self.topk, box_pred_i.size(0))
-
-            # torch.sort is actually faster than .topk (at least on GPUs)
-            predicted_prob, topk_idxs = scores_i.sort(descending=True)
-            topk_scores = predicted_prob[:num_topk]
-            topk_idxs = topk_idxs[:num_topk]
-
-            # filter out the proposals with low confidence score
-            keep_idxs = topk_scores > self.conf_thresh
-            scores = topk_scores[keep_idxs]
-            topk_idxs = topk_idxs[keep_idxs]
-
-            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-            labels = topk_idxs % self.num_classes
-
-            bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores)
-        labels = torch.cat(all_labels)
-        bboxes = torch.cat(all_bboxes)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
-
-        return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred_layers(cls_feats, reg_feats)
-
-        all_cls_preds = outputs['pred_cls']
-        all_box_preds = outputs['pred_box']
-
-        if self.deploy:
-            cls_preds = torch.cat(all_cls_preds, dim=1)[0]
-            box_preds = torch.cat(all_box_preds, dim=1)[0]
-            scores = cls_preds.sigmoid()
-            bboxes = box_preds
-            # [n_anchors_all, 4 + C]
-            outputs = torch.cat([bboxes, scores], dim=-1)
-
-            return outputs
-        else:
-            # post process
-            bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-        
-            return bboxes, scores, labels
-
-
-    # ---------------------- Main Process for Training ----------------------
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # ---------------- Backbone ----------------
-            pyramid_feats = self.backbone(x)
-
-            # ---------------- Neck: SPP ----------------
-            pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-            # ---------------- Neck: PaFPN ----------------
-            pyramid_feats = self.fpn(pyramid_feats)
-
-            # ---------------- Heads ----------------
-            cls_feats, reg_feats = self.det_heads(pyramid_feats)
-
-            # ---------------- Preds ----------------
-            outputs = self.pred_layers(cls_feats, reg_feats)
-            
-            return outputs 
-        

+ 0 - 156
models/detectors/rtcdet/rtcdet_backbone.py

@@ -1,156 +0,0 @@
-import torch
-import torch.nn as nn
-try:
-    from .rtcdet_basic import Conv, ELANBlock, DSBlock
-except:
-    from rtcdet_basic import Conv, ELANBlock, DSBlock
-
-
-model_urls = {
-    'elannet_pico':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_pico.pth",
-    'elannet_nano':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_nano.pth",
-    'elannet_tiny':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_tiny.pth",
-    'elannet_small':  "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_small.pth",
-    'elannet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_medium.pth",
-    'elannet_large':  "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_large.pth",
-    'elannet_huge':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_huge.pth",
-}
-
-
-# ---------------------------- Backbones ----------------------------
-# ELANNet-P5
-class ELANNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANNet, self).__init__()
-        # ------------------ Basic parameters ------------------
-        self.width = width
-        self.depth = depth
-        self.expand_ratios = [0.5, 0.5, 0.5, 0.25]
-        self.feat_dims = [round(64*width), round(128*width), round(256*width), round(512*width), round(1024*width), round(1024*width)]
-        
-        # ------------------ Network parameters ------------------
-        ## P1/2
-        self.layer_1 = nn.Sequential(
-            Conv(3, self.feat_dims[0], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            Conv(self.feat_dims[0], self.feat_dims[0], k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        ## P2/4
-        self.layer_2 = nn.Sequential(   
-            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),             
-            ELANBlock(self.feat_dims[1], self.feat_dims[2], self.expand_ratios[0], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P3/8
-        self.layer_3 = nn.Sequential(
-            DSBlock(self.feat_dims[2], self.feat_dims[2], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[2], self.feat_dims[3], self.expand_ratios[1], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P4/16
-        self.layer_4 = nn.Sequential(
-            DSBlock(self.feat_dims[3], self.feat_dims[3], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[3], self.feat_dims[4], self.expand_ratios[2], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P5/32
-        self.layer_5 = nn.Sequential(
-            DSBlock(self.feat_dims[4], self.feat_dims[4], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[4], self.feat_dims[5], self.expand_ratios[3], self.depth, act_type, norm_type, depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELANNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_depthwise']
-        )
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['bk_depthwise']:
-            backbone = load_weight(backbone, model_name='elannet_pico')
-        elif cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_nano')
-        elif cfg['width'] == 0.375 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_tiny')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='elannet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='elannet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='elannet_huge')
-    feat_dims = backbone.feat_dims[-3:]
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 237
models/detectors/rtcdet/rtcdet_basic.py

@@ -1,237 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-# ---------------------------- 2D CNN ----------------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-# Basic conv layer
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        p = p if d == 1 else d
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# ---------------------------- Modified YOLOv7's Modules ----------------------------
-## ELANBlock for Backbone
-class ELANBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANBlock, self).__init__()
-        if isinstance(expand_ratio, float):
-            inter_dim = int(in_dim * expand_ratio)
-            inter_dim2 = inter_dim
-        elif isinstance(expand_ratio, list):
-            assert len(expand_ratio) == 2
-            e1, e2 = expand_ratio
-            inter_dim = int(in_dim * e1)
-            inter_dim2 = int(inter_dim * e2)
-        # branch-1
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-2
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-3
-        for idx in range(round(3*depth)):
-            if idx == 0:
-                cv3 = [Conv(inter_dim, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cv3.append(Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*cv3)
-        # branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(round(3*depth))
-        ])
-        # output
-        self.out = Conv(inter_dim*2 + inter_dim2*2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        """
-        Input:
-            x: [B, C_in, H, W]
-        Output:
-            out: [B, C_out, H, W]
-        """
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-## ELAN Block for FPN
-class ELANBlockFPN(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio :float=0.5, branch_depth :int=1, shortcut=False, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        # ----------- Basic Parameters -----------
-        self.in_dim = in_dim
-        self.out_dim = out_dim
-        self.inter_dim1 = round(out_dim * expand_ratio)
-        self.inter_dim2 = round(self.inter_dim1 * expand_ratio)
-        self.expand_ratio = expand_ratio
-        self.branch_depth = branch_depth
-        self.shortcut = shortcut
-        # ----------- Network Parameters -----------
-        ## branch-1
-        self.cv1 = Conv(in_dim, self.inter_dim1, k=1, act_type=act_type, norm_type=norm_type)
-        ## branch-2
-        self.cv2 = Conv(in_dim, self.inter_dim1, k=1, act_type=act_type, norm_type=norm_type)
-        ## branch-3
-        self.cv3 = []
-        for i in range(branch_depth):
-            if i == 0:
-                self.cv3.append(Conv(self.inter_dim1, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-            else:
-                self.cv3.append(Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*self.cv3)
-        ## branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## branch-5
-        self.cv5 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## branch-6
-        self.cv6 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## output proj
-        self.out = Conv(self.inter_dim1*2 + self.inter_dim2*4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-        x5 = self.cv5(x4)
-        x6 = self.cv6(x5)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4, x5, x6], dim=1))
-
-        return out
-    
-## DownSample
-class DSBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.mp = nn.MaxPool2d((2, 2), 2)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = nn.Sequential(
-            Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
-            Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-    def forward(self, x):
-        x1 = self.cv1(self.mp(x))
-        x2 = self.cv2(x)
-        out = torch.cat([x1, x2], dim=1)
-
-        return out
-
-
-# ---------------------------- FPN Modules ----------------------------
-## build fpn's core block
-def build_fpn_block(cfg, in_dim, out_dim):
-    if cfg['fpn_core_block'] == 'elanblock':
-        layer = ELANBlockFPN(in_dim        = in_dim,
-                             out_dim       = out_dim,
-                             expand_ratio  = cfg['fpn_expand_ratio'],
-                             branch_depth  = round(3 * cfg['depth']),
-                             shortcut      = False,
-                             act_type      = cfg['fpn_act'],
-                             norm_type     = cfg['fpn_norm'],
-                             depthwise     = cfg['fpn_depthwise']
-                             )
-        
-    return layer
-        
-## build fpn's reduce layer
-def build_reduce_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_reduce_layer'] == 'conv':
-        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer
-
-## build fpn's downsample layer
-def build_downsample_layer(cfg, in_dim, out_dim):
-    if cfg['fpn_downsample_layer'] == 'conv':
-        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-    elif cfg['fpn_downsample_layer'] == 'dsblock':
-        layer = DSBlock(in_dim, out_dim, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-        
-    return layer

+ 0 - 160
models/detectors/rtcdet/rtcdet_head.py

@@ -1,160 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import Conv
-except:
-    from rtcdet_basic import Conv
-
-
-# Single-level Head
-class SingleLevelHead(nn.Module):
-    def __init__(self, in_dim, cls_head_dim, reg_head_dim, num_cls_head, num_reg_head, act_type, norm_type, depthwise):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                cls_feats.append(
-                    Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                        act_type=act_type,
-                        norm_type=norm_type,
-                        depthwise=depthwise)
-                        )      
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-            else:
-                reg_feats.append(
-                    Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                         act_type=act_type,
-                         norm_type=norm_type,
-                         depthwise=depthwise)
-                        )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-
-# Multi-level Head
-class MultiLevelHead(nn.Module):
-    def __init__(self, cfg, in_dims, num_levels=3, num_classes=80, reg_max=16):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SingleLevelHead(
-                in_dims[level],
-                max(in_dims[0], min(num_classes, 100)), # cls head out_dim
-                max(in_dims[0]//4, 16, 4*reg_max),      # reg head out_dim
-                cfg['num_cls_head'],
-                cfg['num_reg_head'],
-                cfg['head_act'],
-                cfg['head_norm'],
-                cfg['head_depthwise'])
-                for level in range(num_levels)
-            ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-    
-
-# build detection head
-def build_det_head(cfg, in_dims, num_levels=3, num_classes=80, reg_max=16):
-    if cfg['head'] == 'decoupled_head':
-        head = MultiLevelHead(cfg, in_dims, num_levels, num_classes, reg_max)
-
-    return head
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'head': 'decoupled_head',
-        'num_cls_head': 2,
-        'num_reg_head': 2,
-        'head_act': 'silu',
-        'head_norm': 'BN',
-        'head_depthwise': False,
-        'reg_max': 16,
-    }
-    fpn_dims = [256, 512, 512]
-    cls_out_dim = 256
-    reg_out_dim = 64
-    # Head-1
-    model = build_det_head(cfg, fpn_dims, num_levels=3, num_classes=80, reg_max=16)
-    print(model)
-    fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
-    t0 = time.time()
-    outputs = model(fpn_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
-    print('==============================')
-    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-1: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 104
models/detectors/rtcdet/rtcdet_neck.py

@@ -1,104 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import Conv
-except:
-    from rtcdet_basic import Conv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio=0.5):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.MaxPool2d(kernel_size=cfg['pooling_size'], stride=1, padding=cfg['pooling_size'] // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim, expand_ratio):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-        self.m = nn.Sequential(
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise']),
-            SPPF(cfg, inter_dim, inter_dim, expand_ratio=1.0),
-            Conv(inter_dim, inter_dim, k=3, p=1, 
-                 act_type=cfg['neck_act'], norm_type=cfg['neck_norm'], 
-                 depthwise=cfg['neck_depthwise'])
-        )
-        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=cfg['neck_act'], norm_type=cfg['neck_norm'])
-
-        
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.m(x2)
-        y = self.cv3(torch.cat([x1, x3], dim=1))
-
-        return y
-
-
-def build_neck(cfg, in_dim, out_dim):
-    model = cfg['neck']
-    print('==============================')
-    print('Neck: {}'.format(model))
-    # build neck
-    if model == 'sppf':
-        neck = SPPF(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-    elif model == 'csp_sppf':
-        neck = SPPFBlockCSP(cfg, in_dim, out_dim, cfg['neck_expand_ratio'])
-
-    return neck
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        ## Neck: SPP
-        'neck': 'csp_sppf',
-        'neck_expand_ratio': 0.5,
-        'pooling_size': 5,
-        'neck_act': 'silu',
-        'neck_norm': 'BN',
-        'neck_depthwise': False,
-    }
-    in_dim = 1024
-    out_dim = 512
-    # Head-1
-    model = build_neck(cfg, in_dim, out_dim)
-    feat = torch.randn(1, in_dim, 20, 20)
-    t0 = time.time()
-    outputs = model(feat)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(feat, ), verbose=False)
-    print('==============================')
-    print('FPN: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('FPN: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 137
models/detectors/rtcdet/rtcdet_pafpn.py

@@ -1,137 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-try:
-    from .rtcdet_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
-except:
-    from rtcdet_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
-
-
-# RTCDet-Style PaFPN
-class RTCDetPaFPN(nn.Module):
-    def __init__(self, cfg, in_dims=[512, 1024, 512], out_dim=None):
-        super(RTCDetPaFPN, self).__init__()
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims
-                
-        # --------------------------- Top-down FPN ---------------------------
-        ## P5 -> P4
-        self.reduce_layer_1 = build_reduce_layer(cfg, in_dims[2], round(512*cfg['width']))
-        self.reduce_layer_2 = build_reduce_layer(cfg, in_dims[1], round(512*cfg['width']))
-        self.top_down_layer_1 = build_fpn_block(cfg, round(512*cfg['width']) + round(512*cfg['width']), round(512*cfg['width']))
-
-        ## P4 -> P3
-        self.reduce_layer_3 = build_reduce_layer(cfg, round(512*cfg['width']), round(256*cfg['width']))
-        self.reduce_layer_4 = build_reduce_layer(cfg, in_dims[0], round(256*cfg['width']))
-        self.top_down_layer_2 = build_fpn_block(cfg, round(256*cfg['width']) + round(256*cfg['width']), round(256*cfg['width']))
-
-        # --------------------------- Bottom-up FPN ---------------------------
-        ## P3 -> P4
-        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*cfg['width']), round(256*cfg['width']))
-        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*cfg['width']) + round(256*cfg['width']), round(512*cfg['width']))
-
-        ## P4 -> P5
-        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*cfg['width']), round(512*cfg['width']))
-        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*cfg['width']) + round(512*cfg['width']), round(1024*cfg['width']))
-                        
-        # --------------------------- Output proj ---------------------------
-        if out_dim is not None:
-            self.out_layers = nn.ModuleList([
-                Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
-                for in_dim in [round(256*cfg['width']), round(512*cfg['width']), round(1024*cfg['width'])]])
-            self.out_dim = [out_dim] * 3
-        else:
-            self.out_layers = None
-            self.out_dim = [round(256*cfg['width']), round(512*cfg['width']), round(1024*cfg['width'])]
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-
-    def forward(self, fpn_feats):
-        c3, c4, c5 = fpn_feats
-
-        # Top down
-        ## P5 -> P4
-        c6 = self.reduce_layer_1(c5)
-        c7 = self.reduce_layer_2(c4)
-        c8 = torch.cat([F.interpolate(c6, scale_factor=2.0), c7], dim=1)
-        c9 = self.top_down_layer_1(c8)
-        ## P4 -> P3
-        c10 = self.reduce_layer_3(c9)
-        c11 = self.reduce_layer_4(c3)
-        c12 = torch.cat([F.interpolate(c10, scale_factor=2.0), c11], dim=1)
-        c13 = self.top_down_layer_2(c12)
-
-        # Bottom up
-        # p3 -> P4
-        c14 = self.downsample_layer_1(c13)
-        c15 = torch.cat([c14, c10], dim=1)
-        c16 = self.bottom_up_layer_1(c15)
-        # P4 -> P5
-        c17 = self.downsample_layer_2(c16)
-        c18 = torch.cat([c17, c6], dim=1)
-        c19 = self.bottom_up_layer_2(c18)
-
-        out_feats = [c13, c16, c19] # [P3, P4, P5]
-        
-        # output proj layers
-        if self.out_layers is not None:
-            out_feats_proj = []
-            for feat, layer in zip(out_feats, self.out_layers):
-                out_feats_proj.append(layer(feat))
-            return out_feats_proj
-
-        return out_feats
-
-
-def build_fpn(cfg, in_dims, out_dim=None):
-    model = cfg['fpn']
-    # build pafpn
-    if model == 'rtcdet_pafpn':
-        fpn_net = RTCDetPaFPN(cfg, in_dims, out_dim)
-
-    return fpn_net
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'width': 1.0,
-        'depth': 1.0,
-        'fpn': 'rtcdet_pafpn',
-        'fpn_reduce_layer': 'conv',
-        'fpn_downsample_layer': 'conv',
-        'fpn_core_block': 'elan_block',
-        'fpn_branch_depth': 3,
-        'fpn_expand_ratio': 0.5,
-        'fpn_act': 'silu',
-        'fpn_norm': 'BN',
-        'fpn_depthwise': False,
-    }
-    fpn_dims = [512, 1024, 512]
-    out_dim = 256
-    # Head-1
-    model = build_fpn(cfg, fpn_dims, out_dim)
-    fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
-    t0 = time.time()
-    outputs = model(fpn_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
-    print('==============================')
-    print('FPN: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('FPN: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 149
models/detectors/rtcdet/rtcdet_pred.py

@@ -1,149 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-# Single-level pred layer
-class SingleLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, stride, num_classes, num_coords=4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred bias
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def forward(self, cls_feat, reg_feat):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        return cls_pred, reg_pred
-    
-
-# Multi-level pred layer
-class MultiLevelPredLayer(nn.Module):
-    def __init__(self, cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3, reg_max=16):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-        self.num_levels = num_levels
-        self.reg_max = reg_max
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [SingleLevelPredLayer(
-                cls_dim,
-                reg_dim,
-                strides[l],
-                num_classes,
-                num_coords * self.reg_max)
-                for l in range(num_levels)
-            ])
-        ## proj conv
-        proj_init = torch.arange(reg_max, dtype=torch.float)
-        self.proj_conv = nn.Conv2d(self.reg_max, 1, kernel_size=1, bias=False).requires_grad_(False)
-        self.proj_conv.weight.data[:] = nn.Parameter(proj_init.view([1, reg_max, 1, 1]))
-
-    def generate_anchors(self, level, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.strides[level]
-
-        return anchors
-        
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        all_delta_preds = []
-        for level in range(self.num_levels):
-            # pred
-            cls_pred, reg_pred = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # generate anchor boxes: [M, 4]
-            B, _, H, W = cls_pred.size()
-            fmp_size = [H, W]
-            anchors = self.generate_anchors(level, fmp_size)
-            anchors = anchors.to(cls_pred.device)
-            # stride tensor: [M, 1]
-            stride_tensor = torch.ones_like(anchors[..., :1]) * self.strides[level]
-            
-            # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-
-            # ----------------------- Decode bbox -----------------------
-            B, M = reg_pred.shape[:2]
-            # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max]
-            delta_pred = reg_pred.reshape([B, M, 4, self.reg_max])
-            # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-            delta_pred = delta_pred.permute(0, 3, 2, 1).contiguous()
-            # [B, reg_max, 4, M] -> [B, 1, 4, M]
-            delta_pred = self.proj_conv(F.softmax(delta_pred, dim=1))
-            # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-            delta_pred = delta_pred.view(B, 4, M).permute(0, 2, 1).contiguous()
-            ## tlbr -> xyxy
-            x1y1_pred = anchors[None] - delta_pred[..., :2] * self.strides[level]
-            x2y2_pred = anchors[None] + delta_pred[..., 2:] * self.strides[level]
-            box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-            all_cls_preds.append(cls_pred)
-            all_reg_preds.append(reg_pred)
-            all_box_preds.append(box_pred)
-            all_delta_preds.append(delta_pred)
-            all_anchors.append(anchors)
-            all_strides.append(stride_tensor)
-        
-        # output dict
-        outputs = {"pred_cls": all_cls_preds,        # List(Tensor) [B, M, C]
-                   "pred_reg": all_reg_preds,        # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box": all_box_preds,        # List(Tensor) [B, M, 4]
-                   "pred_delta": all_delta_preds,    # List(Tensor) [B, M, 4]
-                   "anchors": all_anchors,           # List(Tensor) [M, 2]
-                   "strides": self.strides,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": all_strides      # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-    
-
-# build detection head
-def build_pred_layer(cls_dim, reg_dim, strides, num_classes, num_coords=4, num_levels=3, reg_max=16):
-    pred_layers = MultiLevelPredLayer(cls_dim, reg_dim, strides, num_classes, num_coords, num_levels, reg_max) 
-
-    return pred_layers

+ 0 - 31
models/detectors/rtrdet/build.py

@@ -1,31 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-import torch
-import torch.nn as nn
-
-from .loss import build_criterion
-from .rtrdet import RTRDet
-
-
-# build object detector
-def build_rtrdet(args, cfg, device, num_classes=80, trainable=False, deploy=False):
-    print('==============================')
-    print('Build {} ...'.format(args.model.upper()))
-        
-    # -------------- Build RTRDet --------------
-    model = RTRDet(cfg         = cfg,
-                   device      = device, 
-                   num_classes = num_classes,
-                   trainable   = trainable,
-                   aux_loss    = True if trainable else False,
-                   deploy      = deploy
-                   )
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if trainable:
-        # build criterion for training
-        criterion = build_criterion(cfg, num_classes, aux_loss=True)
-
-    return model, criterion

+ 0 - 165
models/detectors/rtrdet/loss.py

@@ -1,165 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .matcher import build_matcher
-from utils.misc import sigmoid_focal_loss
-from utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
-from utils.distributed_utils import is_dist_avail_and_initialized, get_world_size
-
-
-class Criterion(nn.Module):
-    """ This class computes the loss for DETR.
-    The process happens in two steps:
-        1) we compute hungarian assignment between ground truth boxes and the outputs of the model
-        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
-    """
-    def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):
-        """ Create the criterion.
-        Parameters:
-            num_classes: number of object categories, omitting the special no-object category
-            matcher: module able to compute a matching between targets and proposals
-            weight_dict: dict containing as key the names of the losses and as values their relative weight.
-            eos_coef: relative classification weight applied to the no-object category
-            losses: list of all the losses to be applied. See get_loss for list of available losses.
-        """
-        super().__init__()
-        self.num_classes = num_classes
-        self.matcher = matcher
-        self.weight_dict = weight_dict
-        self.losses = losses
-        self.focal_alpha = focal_alpha
-
-
-    def _get_src_permutation_idx(self, indices):
-        # permute predictions following indices
-        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
-        src_idx = torch.cat([src for (src, _) in indices])
-        return batch_idx, src_idx
-
-
-    def _get_tgt_permutation_idx(self, indices):
-        # permute targets following indices
-        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
-        tgt_idx = torch.cat([tgt for (_, tgt) in indices])
-        return batch_idx, tgt_idx
-
-
-    def loss_labels(self, outputs, targets, indices, num_boxes):
-        """Classification loss (NLL)
-        targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
-        """
-        assert 'pred_logits' in outputs
-        src_logits = outputs['pred_logits']
-
-        idx = self._get_src_permutation_idx(indices)
-        target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]).to(src_logits.device)
-        target_classes = torch.full(src_logits.shape[:2], self.num_classes,
-                                    dtype=torch.int64, device=src_logits.device)
-        target_classes[idx] = target_classes_o
-
-        target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
-                                            dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
-        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
-
-        target_classes_onehot = target_classes_onehot[:, :, :-1]
-        loss_cls = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * \
-                  src_logits.shape[1]
-        losses = {'loss_cls': loss_cls}
-
-        return losses
-
-
-    def loss_boxes(self, outputs, targets, indices, num_boxes):
-        """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
-           targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
-           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
-        """
-        assert 'pred_boxes' in outputs
-        idx = self._get_src_permutation_idx(indices)
-        src_boxes = outputs['pred_boxes'][idx]
-        target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0).to(src_boxes.device)
-
-        loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
-
-        losses = {}
-        losses['loss_box'] = loss_bbox.sum() / num_boxes
-
-        loss_giou = 1 - torch.diag(generalized_box_iou(
-            box_cxcywh_to_xyxy(src_boxes),
-            box_cxcywh_to_xyxy(target_boxes)))
-        losses['loss_giou'] = loss_giou.sum() / num_boxes
-        return losses
-
-
-    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
-        loss_map = {
-            'labels': self.loss_labels,
-            'boxes': self.loss_boxes,
-        }
-        assert loss in loss_map, f'do you really want to compute {loss} loss?'
-        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
-
-
-    def forward(self, outputs, targets, epoch=0):
-        """ This performs the loss computation.
-        Parameters:
-             outputs: dict of tensors, see the output specification of the model for the format
-             targets: list of dicts, such that len(targets) == batch_size.
-                      The expected keys in each dict depends on the losses applied, see each loss' doc
-        """
-        outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
-
-        # Retrieve the matching between the outputs of the last layer and the targets
-        indices = self.matcher(outputs_without_aux, targets)
-
-        # Compute the average number of target boxes accross all nodes, for normalization purposes
-        num_boxes = sum(len(t["labels"]) for t in targets)
-        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_boxes)
-        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
-
-        # Compute all the requested losses
-        losses = {}
-        for loss in self.losses:
-            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
-
-        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
-        if 'aux_outputs' in outputs:
-            for i, aux_outputs in enumerate(outputs['aux_outputs']):
-                indices = self.matcher(aux_outputs, targets)
-                for loss in self.losses:
-                    kwargs = {}
-                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
-                    l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
-                    losses.update(l_dict)
-
-        # compute total losses
-        total_loss = sum(losses[k] * self.weight_dict[k] for k in losses.keys() if k in self.weight_dict)
-        losses['losses'] = total_loss
-
-        return losses
-
-
-# build criterion
-def build_criterion(cfg, num_classes, aux_loss=False):
-    # build matcher
-    matcher_type = cfg['matcher']
-    matcher = build_matcher(cfg)
-    
-    # build criterion
-    weight_dict = {'loss_cls':  cfg['loss_weights'][matcher_type]['loss_cls_weight'],
-                   'loss_box':  cfg['loss_weights'][matcher_type]['loss_box_weight'],
-                   'loss_giou': cfg['loss_weights'][matcher_type]['loss_giou_weight']}
-
-    if aux_loss:
-        aux_weight_dict = {}
-        for i in range(cfg['num_decoder'] - 1):
-            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
-        weight_dict.update(aux_weight_dict)
-    losses = ['labels', 'boxes']
-    criterion = Criterion(num_classes, matcher, weight_dict, losses)
-
-    return criterion
-    

+ 0 - 102
models/detectors/rtrdet/matcher.py

@@ -1,102 +0,0 @@
-import torch
-import torch.nn as nn
-from scipy.optimize import linear_sum_assignment
-from utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou
-
-
-class HungarianMatcher(nn.Module):
-    """This class computes an assignment between the targets and the predictions of the network
-    For efficiency reasons, the targets don't include the no_object. Because of this, in general,
-    there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
-    while the others are un-matched (and thus treated as non-objects).
-    """
-
-    def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
-        """Creates the matcher
-        Params:
-            cost_class: This is the relative weight of the classification error in the matching cost
-            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
-            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
-        """
-        super().__init__()
-        self.cost_class = cost_class
-        self.cost_bbox = cost_bbox
-        self.cost_giou = cost_giou
-        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
-
-
-    @torch.no_grad()
-    def forward(self, outputs, targets):
-        """ Performs the matching
-        Params:
-            outputs: This is a dict that contains at least these entries:
-                 "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
-                 "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
-            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
-                 "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
-                           objects in the target) containing the class labels
-                 "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
-        Returns:
-            A list of size batch_size, containing tuples of (index_i, index_j) where:
-                - index_i is the indices of the selected predictions (in order)
-                - index_j is the indices of the corresponding selected targets (in order)
-            For each batch element, it holds:
-                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
-        """
-        bs, num_queries = outputs["pred_logits"].shape[:2]
-
-        # We flatten to compute the cost matrices in a batch
-        # [B * num_queries, C] = [N, C], where N is B * num_queries
-        out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid()
-        # [B * num_queries, 4] = [N, 4]
-        out_bbox = outputs["pred_boxes"].flatten(0, 1)
-
-        # Also concat the target labels and boxes
-        # [M,] where M is number of all targets in this batch
-        tgt_ids = torch.cat([v["labels"] for v in targets])
-        # [M, 4] where M is number of all targets in this batch
-        tgt_bbox = torch.cat([v["boxes"] for v in targets])
-
-        # Compute the classification cost.
-        alpha = 0.25
-        gamma = 2.0
-        neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
-        pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
-        cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
-
-        # Compute the L1 cost between boxes
-        # [N, M]
-        cost_bbox = torch.cdist(out_bbox, tgt_bbox.to(out_bbox.device), p=1)
-
-        # Compute the giou cost betwen boxes
-        # [N, M]
-        cost_giou = -generalized_box_iou(
-            box_cxcywh_to_xyxy(out_bbox),
-            box_cxcywh_to_xyxy(tgt_bbox.to(out_bbox.device)))
-
-        # Final cost matrix: [N, M]
-        C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
-        # [N, M] -> [B, num_queries, M]
-        C = C.view(bs, num_queries, -1).cpu()
-
-        # The number of boxes in each image
-        sizes = [len(v["boxes"]) for v in targets]
-        # In the last dimension of C, we divide it into B costs, and each cost is [B, num_querys, M_i]
-        # where sum(Mi) = M.
-        # i is the batch index and c is cost_i = [B, num_querys, M_i].
-        # Therefore c[i] is the cost between the i-th sample and i-th prediction.
-        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
-        # As for each (i, j) in indices, i is the prediction indexes and j is the target indexes
-        # i contains row indexes of cost matrix: array([row_1, row_2, row_3]) 
-        # j contains col indexes of cost matrix: array([col_1, col_2, col_3])
-        # len(i) == len(j)
-        # len(indices) = batch_size
-        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
-
-
-def build_matcher(cfg):
-    matcher_type = cfg['matcher']
-    if matcher_type == 'hungarian_matcher':
-        return HungarianMatcher(cfg['matcher_hpy'][matcher_type]['cost_cls_weight'],
-                                cfg['matcher_hpy'][matcher_type]['cost_box_weight'],
-                                cfg['matcher_hpy'][matcher_type]['cost_giou_weight'])

+ 0 - 115
models/detectors/rtrdet/rtrdet.py

@@ -1,115 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .rtrdet_backbone import build_backbone
-from .rtrdet_transformer import build_transformer
-
-
-# Real-time Detection with Transformer
-class RTRDet(nn.Module):
-    def __init__(self, 
-                 cfg,
-                 device, 
-                 num_classes :int = 20, 
-                 trainable   :bool = False, 
-                 aux_loss    :bool = False,
-                 deploy      :bool = False):
-        super(RTRDet, self).__init__()
-        assert cfg['out_stride'] == 16 or cfg['out_stride'] == 32
-        # ------------------ Basic parameters ------------------
-        self.cfg = cfg
-        self.device = device
-        self.out_stride = cfg['out_stride']
-        self.max_stride = cfg['max_stride']
-        self.num_levels = 2 if cfg['out_stride'] == 16 else 1
-        self.num_topk = cfg['num_topk']
-        self.num_classes = num_classes
-        self.d_model = round(cfg['d_model'] * cfg['width'])
-        self.aux_loss = aux_loss
-        self.trainable = trainable
-        self.deploy = deploy
-        
-        # ------------------ Network parameters ------------------
-        ## Backbone
-        self.backbone, self.feat_dims = build_backbone(cfg, trainable&cfg['pretrained'])
-        self.input_projs = nn.ModuleList(nn.Conv2d(self.feat_dims[-i], self.d_model, kernel_size=1) for i in range(1, self.num_levels+1))
-        
-        ## Transformer
-        self.transformer = build_transformer(cfg, num_classes, return_intermediate=aux_loss)
-
-
-    @torch.jit.unused
-    def set_aux_loss(self, outputs_class, outputs_coord):
-        # this is a workaround to make torchscript happy, as torchscript
-        # doesn't support dictionary with non-homogeneous values, such
-        # as a dict having both a Tensor and a list.
-        return [{'pred_logits': a, 'pred_boxes': b}
-                for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
-
-
-    # ---------------------- Main Process for Inference ----------------------
-    @torch.no_grad()
-    def inference_single_image(self, x):
-        # -------------------- Inference --------------------
-        ## Backbone
-        pyramid_feats = self.backbone(x)
-        
-        ## Input proj
-        for idx in range(1, self.num_levels + 1):
-            pyramid_feats[-idx] = self.input_projs[idx-1](pyramid_feats[-idx])
-
-        ## Transformer
-        if self.num_levels == 2:
-            src1, src2 = pyramid_feats[-2], pyramid_feats[-1]
-        else:
-            src1, src2 = None, pyramid_feats[-1]
-        output_classes, output_coords = self.transformer(src1, src2)
-
-        # -------------------- Post-process --------------------
-        ## Top-k
-        cls_pred, box_pred = output_classes[-1].flatten().sigmoid_(), output_coords[-1]
-        cls_pred = cls_pred[0].flatten().sigmoid_()
-        box_pred = box_pred[0]
-        predicted_prob, topk_idxs = cls_pred.sort(descending=True)
-        topk_idxs = topk_idxs[:self.num_topk]
-        topk_box_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-        topk_scores = predicted_prob[:self.num_topk]
-        topk_labels = topk_idxs % self.num_classes
-        topk_bboxes = box_pred[topk_box_idxs]
-        ## Denormalize bbox
-        img_h, img_w = x.shape[-2:]
-        topk_bboxes[..., 0::2] *= img_w
-        topk_bboxes[..., 1::2] *= img_h
-
-        if self.deploy:
-            return topk_bboxes, topk_scores, topk_labels
-        else:
-            return topk_bboxes.cpu().numpy(), topk_scores.cpu().numpy(), topk_labels.cpu().numpy()
-        
-
-    # ---------------------- Main Process for Training ----------------------
-    def forward(self, x):
-        if not self.trainable:
-            return self.inference_single_image(x)
-        else:
-            # -------------------- Inference --------------------
-            ## Backbone
-            pyramid_feats = self.backbone(x)
-            
-            ## Input proj
-            for idx in range(1, self.num_levels + 1):
-                pyramid_feats[-idx] = self.input_projs[idx-1](pyramid_feats[-idx])
-
-            ## Transformer
-            if self.num_levels == 2:
-                src1, src2 = pyramid_feats[-2], pyramid_feats[-1]
-            else:
-                src1, src2 = None, pyramid_feats[-1]
-            output_classes, output_coords = self.transformer(src1, src2)
-
-            outputs = {'pred_logits': output_classes[-1], 'pred_boxes': output_coords[-1]}
-            if self.aux_loss:
-                outputs['aux_outputs'] = self.set_aux_loss(output_classes, output_coords)
-            
-            return outputs
-    

+ 0 - 157
models/detectors/rtrdet/rtrdet_backbone.py

@@ -1,157 +0,0 @@
-import torch
-import torch.nn as nn
-try:
-    from .rtrdet_basic import Conv, ELANBlock, DSBlock
-except:
-    from rtrdet_basic import Conv, ELANBlock, DSBlock
-
-
-model_urls = {
-    'elannet_pico':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_pico.pth",
-    'elannet_nano':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_nano.pth",
-    'elannet_tiny':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_tiny.pth",
-    'elannet_small':  "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_small.pth",
-    'elannet_medium': "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_medium.pth",
-    'elannet_large':  "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_large.pth",
-    'elannet_huge':   "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/elannet_huge.pth",
-}
-
-
-# ---------------------------- Backbones ----------------------------
-# ELANNet-P5
-class ELANNet(nn.Module):
-    def __init__(self, width=1.0, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANNet, self).__init__()
-        # ------------------ Basic parameters ------------------
-        self.width = width
-        self.depth = depth
-        self.expand_ratios = [0.5, 0.5, 0.5, 0.25]
-        self.feat_dims = [round(64*width), round(128*width), round(256*width), round(512*width), round(1024*width), round(1024*width)]
-        
-        # ------------------ Network parameters ------------------
-        ## P1/2
-        self.layer_1 = nn.Sequential(
-            Conv(3, self.feat_dims[0], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type),
-            Conv(self.feat_dims[0], self.feat_dims[0], k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        ## P2/4
-        self.layer_2 = nn.Sequential(   
-            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),             
-            ELANBlock(self.feat_dims[1], self.feat_dims[2], self.expand_ratios[0], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P3/8
-        self.layer_3 = nn.Sequential(
-            DSBlock(self.feat_dims[2], self.feat_dims[2], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[2], self.feat_dims[3], self.expand_ratios[1], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P4/16
-        self.layer_4 = nn.Sequential(
-            DSBlock(self.feat_dims[3], self.feat_dims[3], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[3], self.feat_dims[4], self.expand_ratios[2], self.depth, act_type, norm_type, depthwise)
-        )
-        ## P5/32
-        self.layer_5 = nn.Sequential(
-            DSBlock(self.feat_dims[4], self.feat_dims[4], act_type, norm_type, depthwise),             
-            ELANBlock(self.feat_dims[4], self.feat_dims[5], self.expand_ratios[3], self.depth, act_type, norm_type, depthwise)
-        )
-
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-                    print(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-## build ELAN-Net
-def build_backbone(cfg, pretrained=False): 
-    # model
-    backbone = ELANNet(
-        width=cfg['width'],
-        depth=cfg['depth'],
-        act_type=cfg['bk_act'],
-        norm_type=cfg['bk_norm'],
-        depthwise=cfg['bk_depthwise']
-        )
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34 and cfg['bk_depthwise']:
-            backbone = load_weight(backbone, model_name='elannet_pico')
-        elif cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_nano')
-        elif cfg['width'] == 0.375 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_tiny')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='elannet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='elannet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='elannet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='elannet_huge')
-    feat_dims = backbone.feat_dims[-3:]
-
-    return backbone, feat_dims
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'pretrained': True,
-        'bk_act': 'silu',
-        'bk_norm': 'BN',
-        'bk_depthwise': False,
-        'width': 1.0,
-        'depth': 1.0,
-    }
-    model, feats = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 384
models/detectors/rtrdet/rtrdet_basic.py

@@ -1,384 +0,0 @@
-import copy
-import torch
-import torch.nn as nn
-from typing import Optional
-from torch import Tensor
-
-
-# ---------------------------- Basic functions ----------------------------
-class SiLU(nn.Module):
-    """export-friendly version of nn.SiLU()"""
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-
-def get_clones(module, N):
-    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
-    
-
-# ---------------------------- 2D CNN ----------------------------
-class Conv(nn.Module):
-    def __init__(self, 
-                 c1,                   # in channels
-                 c2,                   # out channels 
-                 k=1,                  # kernel size 
-                 p=0,                  # padding
-                 s=1,                  # padding
-                 d=1,                  # dilation
-                 act_type='lrelu',     # activation
-                 norm_type='BN',       # normalization
-                 depthwise=False):
-        super(Conv, self).__init__()
-        convs = []
-        add_bias = False if norm_type else True
-        p = p if d == 1 else d
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-
-        else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
-
-    def forward(self, x):
-        return self.convs(x)
-
-
-# ------------------------------- MLP -------------------------------
-class MLP(nn.Module):
-    """ Very simple multi-layer perceptron (also called FFN)"""
-
-    def __init__(self, in_dim, hidden_dim, out_dim, num_layers):
-        super().__init__()
-        self.num_layers = num_layers
-        h = [hidden_dim] * (num_layers - 1)
-        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([in_dim] + h, h + [out_dim]))
-
-    def forward(self, x):
-        for i, layer in enumerate(self.layers):
-            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
-        return x
-
-class FFN(nn.Module):
-    def __init__(self, d_model=256, mlp_ratio=4.0, dropout=0., act_type='relu'):
-        super().__init__()
-        self.fpn_dim = round(d_model * mlp_ratio)
-        self.linear1 = nn.Linear(d_model, self.fpn_dim)
-        self.activation = get_activation(act_type)
-        self.dropout2 = nn.Dropout(dropout)
-        self.linear2 = nn.Linear(self.fpn_dim, d_model)
-        self.dropout3 = nn.Dropout(dropout)
-        self.norm2 = nn.LayerNorm(d_model)
-
-    def forward(self, src):
-        src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
-        src = src + self.dropout3(src2)
-        src = self.norm2(src)
-        return src
-    
-# ---------------------------- Attention ----------------------------
-class MultiHeadAttention(nn.Module):
-    def __init__(self, d_model, num_heads, dropout=0.) -> None:
-        super().__init__()
-        # --------------- Basic parameters ---------------
-        self.d_model = d_model
-        self.num_heads = num_heads
-        self.dropout = dropout
-        self.scale = (d_model // num_heads) ** -0.5
-
-        # --------------- Network parameters ---------------
-        self.q_proj = nn.Linear(d_model, d_model, bias = False) # W_q, W_k, W_v
-        self.k_proj = nn.Linear(d_model, d_model, bias = False) # W_q, W_k, W_v
-        self.v_proj = nn.Linear(d_model, d_model, bias = False) # W_q, W_k, W_v
-
-        self.out_proj = nn.Linear(d_model, d_model)
-        self.dropout = nn.Dropout(dropout)
-
-
-    def forward(self, query, key, value):
-        """
-        Inputs:
-            query : (Tensor) -> [B, Nq, C]
-            key   : (Tensor) -> [B, Nk, C]
-            value : (Tensor) -> [B, Nk, C]
-        """
-        bs = query.shape[0]
-        Nq = query.shape[1]
-        Nk = key.shape[1]
-
-        # ----------------- Input proj -----------------
-        query = self.q_proj(query)
-        key   = self.k_proj(key)
-        value = self.v_proj(value)
-
-        # ----------------- Multi-head Attn -----------------
-        ## [B, N, C] -> [B, N, H, C_h] -> [B, H, N, C_h]
-        query = query.view(bs, Nq, self.num_heads, self.d_model // self.num_heads)
-        query = query.permute(0, 2, 1, 3).contiguous()
-        key   = key.view(bs, Nk, self.num_heads, self.d_model // self.num_heads)
-        key   = key.permute(0, 2, 1, 3).contiguous()
-        value = value.view(bs, Nk, self.num_heads, self.d_model // self.num_heads)
-        value = value.permute(0, 2, 1, 3).contiguous()
-        # Attention
-        ## [B, H, Nq, C_h] X [B, H, C_h, Nk] = [B, H, Nq, Nk]
-        sim_matrix = torch.matmul(query, key.transpose(-1, -2)) * self.scale
-        sim_matrix = torch.softmax(sim_matrix, dim=-1)
-
-        # ----------------- Output -----------------
-        out = torch.matmul(sim_matrix, value)  # [B, H, Nq, C_h]
-        out = out.permute(0, 2, 1, 3).contiguous().view(bs, Nq, -1)
-        out = self.out_proj(out)
-
-        return out
-        
-
-# ---------------------------- Modified YOLOv7's Modules ----------------------------
-class ELANBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, depth=1.0, act_type='silu', norm_type='BN', depthwise=False):
-        super(ELANBlock, self).__init__()
-        if isinstance(expand_ratio, float):
-            inter_dim = int(in_dim * expand_ratio)
-            inter_dim2 = inter_dim
-        elif isinstance(expand_ratio, list):
-            assert len(expand_ratio) == 2
-            e1, e2 = expand_ratio
-            inter_dim = int(in_dim * e1)
-            inter_dim2 = int(inter_dim * e2)
-        # branch-1
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-2
-        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        # branch-3
-        for idx in range(round(3*depth)):
-            if idx == 0:
-                cv3 = [Conv(inter_dim, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cv3.append(Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*cv3)
-        # branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(inter_dim2, inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(round(3*depth))
-        ])
-        # output
-        self.out = Conv(inter_dim*2 + inter_dim2*2, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        """
-        Input:
-            x: [B, C_in, H, W]
-        Output:
-            out: [B, C_out, H, W]
-        """
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-class ELANBlockFPN(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio :float=0.5, branch_depth :int=1, shortcut=False, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        # ----------- Basic Parameters -----------
-        self.in_dim = in_dim
-        self.out_dim = out_dim
-        self.inter_dim1 = round(out_dim * expand_ratio)
-        self.inter_dim2 = round(self.inter_dim1 * expand_ratio)
-        self.expand_ratio = expand_ratio
-        self.branch_depth = branch_depth
-        self.shortcut = shortcut
-        # ----------- Network Parameters -----------
-        ## branch-1
-        self.cv1 = Conv(in_dim, self.inter_dim1, k=1, act_type=act_type, norm_type=norm_type)
-        ## branch-2
-        self.cv2 = Conv(in_dim, self.inter_dim1, k=1, act_type=act_type, norm_type=norm_type)
-        ## branch-3
-        self.cv3 = []
-        for i in range(branch_depth):
-            if i == 0:
-                self.cv3.append(Conv(self.inter_dim1, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-            else:
-                self.cv3.append(Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.cv3 = nn.Sequential(*self.cv3)
-        ## branch-4
-        self.cv4 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## branch-5
-        self.cv5 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## branch-6
-        self.cv6 = nn.Sequential(*[
-            Conv(self.inter_dim2, self.inter_dim2, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-            for _ in range(branch_depth)
-        ])
-        ## output proj
-        self.out = Conv(self.inter_dim1*2 + self.inter_dim2*4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        x3 = self.cv3(x2)
-        x4 = self.cv4(x3)
-        x5 = self.cv5(x4)
-        x6 = self.cv6(x5)
-
-        # [B, C, H, W] -> [B, 2C, H, W]
-        out = self.out(torch.cat([x1, x2, x3, x4, x5, x6], dim=1))
-
-        return out
-    
-class DSBlock(nn.Module):
-    def __init__(self, in_dim, out_dim, act_type='silu', norm_type='BN', depthwise=False):
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.mp = nn.MaxPool2d((2, 2), 2)
-        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = nn.Sequential(
-            Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
-            Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-    def forward(self, x):
-        x1 = self.cv1(self.mp(x))
-        x2 = self.cv2(x)
-        out = torch.cat([x1, x2], dim=1)
-
-        return out
-
-
-# ---------------------------- Transformer Modules ----------------------------
-class TREncoderLayer(nn.Module):
-    def __init__(self,
-                 d_model,
-                 num_heads,
-                 mlp_ratio=4.0,
-                 dropout=0.1,
-                 act_type="relu",
-                 ):
-        super().__init__()
-        # Multi-head Self-Attn
-        self.self_attn = MultiHeadAttention(d_model, num_heads, dropout)
-        self.dropout = nn.Dropout(dropout)
-        self.norm = nn.LayerNorm(d_model)
-
-        # Feedforwaed Network
-        self.ffn = FFN(d_model, mlp_ratio, dropout, act_type)
-
-    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
-        return tensor if pos is None else tensor + pos
-
-    def forward(self, src, pos):
-        """
-        Input:
-            src: [torch.Tensor] -> [B, N, C]
-            pos: [torch.Tensor] -> [B, N, C]
-        Output:
-            src: [torch.Tensor] -> [B, N, C]
-        """
-        q = k = self.with_pos_embed(src, pos)
-
-        # self-attn
-        src2 = self.self_attn(q, k, value=src)
-
-        # reshape: [B, N, C] -> [B, C, H, W]
-        src = src + self.dropout(src2)
-        src = self.norm(src)
-
-        # ffpn
-        src = self.ffn(src)
-        
-        return src
-
-class TRDecoderLayer(nn.Module):
-    def __init__(self,
-                 d_model,
-                 num_heads,
-                 mlp_ratio=4.0,
-                 dropout=0.1,
-                 act_type="relu"):
-        super().__init__()
-        self.d_model = d_model
-        # self attention
-        self.self_attn = MultiHeadAttention(d_model, num_heads, dropout)
-        self.dropout1 = nn.Dropout(dropout)
-        self.norm1 = nn.LayerNorm(d_model)
-        # cross attention
-        self.cross_attn = MultiHeadAttention(d_model, num_heads, dropout)
-        self.dropout2 = nn.Dropout(dropout)
-        self.norm2 = nn.LayerNorm(d_model)
-        # FFN
-        self.ffn = FFN(d_model, mlp_ratio, dropout, act_type)
-
-    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
-        return tensor if pos is None else tensor + pos
-
-    def forward(self, tgt, query_pos, memory, memory_pos):
-        # self attention
-        q1 = k1 = self.with_pos_embed(tgt, query_pos)
-        v1 = tgt
-        tgt2 = self.self_attn(q1, k1, v1)
-        tgt = tgt + self.dropout1(tgt2)
-        tgt = self.norm1(tgt)
-
-        # cross attention
-        q2 = self.with_pos_embed(tgt, query_pos)
-        k2 = self.with_pos_embed(memory, memory_pos)
-        v2 = memory
-        tgt2 = self.cross_attn(q2, k2, v2)
-        tgt = tgt + self.dropout2(tgt2)
-        tgt = self.norm2(tgt)
-
-        # ffn
-        tgt = self.ffn(tgt)
-
-        return tgt
-    

+ 0 - 184
models/detectors/rtrdet/rtrdet_transformer.py

@@ -1,184 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-
-from .rtrdet_basic import get_clones, TREncoderLayer, TRDecoderLayer, MLP
-
-
-class RTRDetTransformer(nn.Module):
-    def __init__(self, cfg, num_classes, return_intermediate):
-        super().__init__()
-        # -------------------- Basic Parameters ---------------------
-        self.d_model = round(cfg['d_model']*cfg['width'])
-        self.num_classes = num_classes
-        self.num_encoder = cfg['num_encoder']
-        self.num_deocder = cfg['num_decoder']
-        self.num_queries = cfg['decoder_num_queries']
-        self.num_pattern = cfg['decoder_num_pattern']
-        self.stop_layer_id = cfg['num_decoder'] if cfg['stop_layer_id'] == -1 else cfg['stop_layer_id']
-        self.return_intermediate = return_intermediate
-        self.scale = 2 * 3.141592653589793
-
-        # -------------------- Network Parameters ---------------------
-        ## Transformer Encoder
-        encoder_layer = TREncoderLayer(
-            self.d_model, cfg['encoder_num_head'], cfg['encoder_mlp_ratio'], cfg['encoder_dropout'], cfg['encoder_act'])
-        self.encoder_layers = get_clones(encoder_layer, cfg['num_encoder'])
-
-        ## Transformer Decoder
-        decoder_layer = TRDecoderLayer(
-            self.d_model, cfg['decoder_num_head'], cfg['decoder_mlp_ratio'], cfg['decoder_dropout'], cfg['decoder_act'])
-        self.decoder_layers = get_clones(decoder_layer, cfg['num_decoder'])
-
-        ## Pattern embed
-        self.pattern = nn.Embedding(cfg['decoder_num_pattern'], self.d_model)
-
-        ## Position embed
-        self.position = nn.Embedding(cfg['decoder_num_queries'], 2)
-
-        ## Adaptive PosEmbed
-        self.adapt_pos2d = nn.Sequential(
-            nn.Linear(self.d_model, self.d_model),
-            nn.ReLU(),
-            nn.Linear(self.d_model, self.d_model),
-        )
-
-        ## Output head
-        self.class_embed = nn.Linear(self.d_model, self.num_classes)
-        self.bbox_embed  = MLP(self.d_model, self.d_model, 4, 3)
-
-        self._reset_parameters()
-
-    def _reset_parameters(self):
-        prior_prob = 0.01
-        bias_value = -math.log((1 - prior_prob) / prior_prob)
-        self.class_embed.bias.data = torch.ones(self.num_classes) * bias_value
-
-        nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
-        nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
-        nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
-        nn.init.uniform_(self.position.weight.data, 0, 1)
-
-        self.class_embed = nn.ModuleList([self.class_embed for _ in range(self.num_deocder)])
-        self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(self.num_deocder)])
-
-    def generate_posembed(self, x, temperature=10000):
-        num_pos_feats, hs, ws = x.shape[1]//2, x.shape[2], x.shape[3]
-        # generate xy coord mat
-        y_embed, x_embed = torch.meshgrid(
-            [torch.arange(1, hs+1, dtype=torch.float32),
-             torch.arange(1, ws+1, dtype=torch.float32)])
-        y_embed = y_embed / (hs + 1e-6) * self.scale
-        x_embed = x_embed / (ws + 1e-6) * self.scale
-    
-        # [H, W] -> [1, H, W]
-        y_embed = y_embed[None, :, :].to(x.device)
-        x_embed = x_embed[None, :, :].to(x.device)
-
-        dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=x.device)
-        dim_t_ = torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats
-        dim_t = temperature ** (2 * dim_t_)
-
-        pos_x = torch.div(x_embed[..., None], dim_t)
-        pos_y = torch.div(y_embed[..., None], dim_t)
-        pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=4).flatten(3)
-        pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=4).flatten(3)
-
-        # [B, H, W, C] -> [B, C, H, W]
-        pos_embed = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
-        
-        return pos_embed
-        
-    def pos2posemb2d(self, pos, temperature=10000):
-        scale = 2 * math.pi
-        num_pos_feats = self.d_model // 2
-        pos = pos * scale
-        dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
-        dim_t_ = torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats
-        dim_t = temperature ** (2 * dim_t_)
-        pos_x = pos[..., 0, None] / dim_t
-        pos_y = pos[..., 1, None] / dim_t
-        pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
-        pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
-        posemb = torch.cat((pos_y, pos_x), dim=-1)
-        
-        return posemb
-
-    def inverse_sigmoid(self, x):
-        x = x.clamp(min=0, max=1)
-        return torch.log(x.clamp(min=1e-5)/(1 - x).clamp(min=1e-5))
-
-    def forward(self, src1=None, src2=None):
-        """
-        Input:
-            src1: C4-level feature -> [B, C4, H4, W4]
-            sec2: C5-level feature -> [B, C5, H5, W5]
-        Output:
-
-        """
-        bs, c, h, w = src2.size()
-
-        # ------------------------ Transformer Encoder ------------------------
-        ## Generate pos_embed for src2
-        pos2d_embed_2 = self.generate_posembed(src2)
-        
-        ## Reshape: [B, C, H, W] -> [B, N, C], N = HW
-        src2 = src2.flatten(2).permute(0, 2, 1).contiguous()
-        pos2d_embed_2 = self.adapt_pos2d(pos2d_embed_2.flatten(2).permute(0, 2, 1).contiguous())
-        
-        ## Encoder layer
-        for layer_id, encoder_layer in enumerate(self.encoder_layers):
-            src2 = encoder_layer(src2, pos2d_embed_2)
-        
-        ## Feature fusion
-        src2 = src2.permute(0, 2, 1).reshape(bs, c, h, w)
-        if src1 is not None:
-            src1 = src1 + nn.functional.interpolate(src2, scale_factor=2.0)
-        else:
-            src1 = src2
-        
-        # ------------------------ Transformer Decoder ------------------------
-        ## Generate pos_embed for src1
-        pos2d_embed_1 = self.generate_posembed(src1)
-
-        ## Reshape memory: [B, C, H, W] -> [B, N, C], N = HW
-        src1 = src1.flatten(2).permute(0, 2, 1).contiguous()
-        pos2d_embed_1 = self.adapt_pos2d(pos2d_embed_1.flatten(2).permute(0, 2, 1).contiguous())
-
-        ## Reshape tgt: [Na, C] -> [1, Na, 1, C] -> [1, Na, Np, C] -> [1, Nq, C], Nq = Na*Np
-        tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_queries, 1)
-        tgt = tgt.reshape(bs, self.num_pattern * self.num_queries, c)
-        
-        ## Prepare reference points
-        reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
-
-        ## Decoder layer
-        output_classes = []
-        output_coords = []
-        for layer_id, decoder_layer in enumerate(self.decoder_layers):
-            ## query embed
-            query_pos = self.adapt_pos2d(self.pos2posemb2d(reference_points))
-            tgt = decoder_layer(tgt, query_pos, src1, pos2d_embed_1)
-            reference = self.inverse_sigmoid(reference_points)
-            ## class
-            outputs_class = self.class_embed[layer_id](tgt)
-            ## bbox
-            tmp = self.bbox_embed[layer_id](tgt)
-            tmp[..., :2] += reference
-            outputs_coord = tmp.sigmoid()
-
-            output_classes.append(outputs_class)
-            output_coords.append(outputs_coord)
-
-            if layer_id == self.stop_layer_id:
-                break
-
-        return torch.stack(output_classes), torch.stack(output_coords)
-
-    
-# build detection head
-def build_transformer(cfg, num_classes, return_intermediate=False):
-    if cfg['transformer'] == "RTRDetTransformer":
-        transoformer = RTRDetTransformer(cfg, num_classes, return_intermediate) 
-
-    return transoformer