yjh0410 vor 1 Jahr
Ursprung
Commit
9b1a304136

+ 0 - 3
yolo/config/__init__.py

@@ -7,7 +7,6 @@ from .yolov5_af_config import build_yolov5af_config
 from .yolov7_af_config import build_yolov7af_config
 from .yolov8_config    import build_yolov8_config
 from .gelan_config     import build_gelan_config
-from .rtcdet_config    import build_rtcdet_config
 from .rtdetr_config    import build_rtdetr_config
 
 def build_config(args):
@@ -30,8 +29,6 @@ def build_config(args):
         cfg = build_yolov8_config(args)
     elif 'gelan' in args.model:
         cfg = build_gelan_config(args)
-    elif 'rtcdet' in args.model:
-        cfg = build_rtcdet_config(args)
     # ----------- RT-DETR -----------
     elif 'rtdetr' in args.model:
         cfg = build_rtdetr_config(args)

+ 0 - 222
yolo/config/rtcdet_config.py

@@ -1,222 +0,0 @@
-# yolo Config
-
-
-def build_rtcdet_config(args):
-    if   args.model == 'rtcdet_n':
-        return RTCDetNConfig()
-    elif args.model == 'rtcdet_t':
-        return RTCDetTConfig()
-    elif args.model == 'rtcdet_s':
-        return RTCDetSConfig()
-    elif args.model == 'rtcdet_m':
-        return RTCDetMConfig()
-    elif args.model == 'rtcdet_l':
-        return RTCDetLConfig()
-    elif args.model == 'rtcdet_x':
-        return RTCDetXConfig()
-    else:
-        raise NotImplementedError("No config for model: {}".format(args.model))
-    
-# RTCDet-Base config
-class RTCDetBaseConfig(object):
-    def __init__(self) -> None:
-        # ---------------- Model config ----------------
-        self.channel_width = 1.0
-        self.last_stage_ratio = 1.0
-        self.num_blocks = [3, 6, 6, 3]
-        self.num_levels = 3
-        self.out_stride = [8, 16, 32]
-        self.max_stride = 32
-        self.reg_max    = 16
-        self.scale      = "b"
-        ## Backbone
-        self.bk_act   = 'silu'
-        self.bk_norm  = 'BN'
-        self.bk_depthwise = False
-        self.use_pretrained = False
-        ## Neck
-        self.neck_act       = 'silu'
-        self.neck_norm      = 'BN'
-        self.neck_depthwise = False
-        self.neck_expand_ratio = 0.5
-        self.spp_pooling_size  = 5
-        ## FPN
-        self.fpn_num_blocks = 3
-        self.fpn_act  = 'silu'
-        self.fpn_norm = 'BN'
-        self.fpn_depthwise = False
-        ## Head
-        self.head_act  = 'silu'
-        self.head_norm = 'BN'
-        self.head_depthwise = False
-        self.num_cls_head   = 2
-        self.num_reg_head   = 2
-
-        # ---------------- Post-process config ----------------
-        ## Post process
-        self.val_topk = 1000
-        self.val_conf_thresh = 0.001
-        self.val_nms_thresh  = 0.7
-        self.test_topk = 100
-        self.test_conf_thresh = 0.3
-        self.test_nms_thresh  = 0.5
-
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        self.ota_soft_center_radius = 3.0
-        self.ota_topk_candidates = 13
-        ## Loss weight
-        self.loss_cls = 1.0
-        self.loss_box = 2.0
-        self.loss_dfl = 0.5
-
-        # ---------------- ModelEMA config ----------------
-        self.use_ema = True
-        self.ema_decay = 0.9998
-        self.ema_tau   = 2000
-
-        # ---------------- Optimizer config ----------------
-        self.trainer      = 'yolo'
-        self.optimizer    = 'adamw'
-        self.per_image_lr = 0.001 / 64
-        self.base_lr      = None      # base_lr = per_image_lr * batch_size
-        self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
-        self.momentum     = 0.9
-        self.weight_decay = 0.05
-        self.clip_max_norm   = 35.0
-        self.warmup_bias_lr  = 0.1
-        self.warmup_momentum = 0.8
-
-        # ---------------- Lr Scheduler config ----------------
-        self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
-        self.max_epoch    = 300
-        self.eval_epoch   = 10
-        self.no_aug_epoch = 20
-
-        # ---------------- Data process config ----------------
-        self.aug_type = 'yolo'
-        self.box_format = 'xyxy'
-        self.normalize_coords = False
-        self.mosaic_prob = 0.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.0           # approximated by the YOLOX's mixup
-        self.multi_scale = [0.5, 1.25]   # multi scale: [img_size * 0.5, img_size * 1.25]
-        ## Pixel mean & std
-        self.pixel_mean = [0., 0., 0.]
-        self.pixel_std  = [255., 255., 255.]
-        ## Transforms
-        self.train_img_size = 640
-        self.test_img_size  = 640
-        self.use_ablu = True
-        self.affine_params = {
-            'degrees': 0.0,
-            'translate': 0.2,
-            'scale': [0.1, 2.0],
-            'shear': 0.0,
-            'perspective': 0.0,
-            'hsv_h': 0.015,
-            'hsv_s': 0.7,
-            'hsv_v': 0.4,
-        }
-
-    def print_config(self):
-        config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
-        for k, v in config_dict.items():
-            print("{} : {}".format(k, v))
-
-# RTCDet-N
-class RTCDetNConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 0.25
-        self.last_stage_ratio = 2.0
-        self.num_blocks = [1, 2, 2, 1]
-        self.scale = "n"
-        self.fpn_num_blocks = 1
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# RTCDet-N
-class RTCDetTConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 0.375
-        self.last_stage_ratio = 2.0
-        self.num_blocks = [1, 2, 2, 1]
-        self.scale = "t"
-        self.fpn_num_blocks = 1
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# RTCDet-S
-class RTCDetSConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 0.50
-        self.num_blocks = [1, 2, 2, 1]
-        self.last_stage_ratio = 2.0
-        self.scale = "s"
-        self.fpn_num_blocks = 1
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# RTCDet-M
-class RTCDetMConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 0.75
-        self.last_stage_ratio = 1.5
-        self.num_blocks = [2, 4, 4, 2]
-        self.scale = "m"
-        self.fpn_num_blocks = 2
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5
-
-# RTCDet-L
-class RTCDetLConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 1.0
-        self.last_stage_ratio = 1.0
-        self.num_blocks = [3, 6, 6, 3]
-        self.scale = "l"
-        self.fpn_num_blocks = 3
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5
-
-# RTCDet-X
-class RTCDetXConfig(RTCDetBaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.channel_width = 1.25
-        self.last_stage_ratio = 1.0
-        self.num_blocks = [3, 6, 6, 3]
-        self.scale = "x"
-        self.fpn_num_blocks = 3
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5

+ 0 - 4
yolo/models/__init__.py

@@ -10,7 +10,6 @@ from .yolov5_af.build import build_yolov5af
 from .yolov7_af.build import build_yolov7af
 from .yolov8.build    import build_yolov8
 from .gelan.build     import build_gelan
-from .rtcdet.build    import build_rtcdet
 from .rtdetr.build    import build_rtdetr
 
 # build object detector
@@ -40,9 +39,6 @@ def build_model(args, cfg, is_val=False):
     ## GElan
     elif 'gelan' in args.model:
         model, criterion = build_gelan(cfg, is_val)
-    ## RTCDet
-    elif 'rtcdet' in args.model:
-        model, criterion = build_rtcdet(cfg, is_val)
     ## RT-DETR
     elif 'rtdetr' in args.model:
         model, criterion = build_rtdetr(cfg, is_val)

+ 0 - 56
yolo/models/rtcdet/README.md

@@ -1,56 +0,0 @@
-# RTCDet: My Empirical Study of Real-Time Convolutional Object Detectors.
-
-- VOC
-
-|     Model   | Batch | Scale | AP<sup>val<br>0.5 | Weight |  Logs  |
-|-------------|-------|-------|-------------------|--------|--------|
-| RTCDet-S    | 1xb16 |  640  |               |  |  |
-
-- COCO
-
-|    Model    | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |  Logs  |
-|-------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|--------|
-| RTCDet-S    | 1xb16 |  640  |                    |               |   26.9            |   8.9             |  |  |
-
-
-
-## Train RTCDet
-### Single GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m rtcdet_s -bs 16 --fp16 
-```
-
-### Multi GPU
-Taking training RTCDet-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda --distributed -d coco --root path/to/coco -m rtcdet_s -bs 256 --fp16 
-```
-
-## Test RTCDet
-Taking testing RTCDet-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m rtcdet_s --weight path/to/RTCDet.pth --show 
-```
-
-## Evaluate RTCDet
-Taking evaluating RTCDet-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco --root path/to/coco -m rtcdet_s --weight path/to/RTCDet.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m rtcdet_s --weight path/to/weight --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m rtcdet_s --weight path/to/weight --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m rtcdet_s --weight path/to/weight --show --gif
-```

+ 0 - 24
yolo/models/rtcdet/build.py

@@ -1,24 +0,0 @@
-import torch.nn as nn
-
-from .loss import SetCriterion
-from .rtcdet import RTCDet
-
-
-# build object detector
-def build_rtcdet(cfg, is_val=False):
-    # -------------- Build YOLO --------------
-    model = RTCDet(cfg, is_val)
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if is_val:
-        # build criterion for training
-        criterion = SetCriterion(cfg)
-        
-    return model, criterion

+ 0 - 177
yolo/models/rtcdet/loss.py

@@ -1,177 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from utils.box_ops import get_ious, bbox2dist
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import AlignedSimOTA
-
-
-class SetCriterion(object):
-    def __init__(self, cfg):
-        self.cfg = cfg
-        self.reg_max = cfg.reg_max
-        self.num_classes = cfg.num_classes
-        # --------------- Loss config ---------------
-        self.loss_cls_weight = cfg.loss_cls
-        self.loss_box_weight = cfg.loss_box
-        self.loss_dfl_weight = cfg.loss_dfl
-        # --------------- Matcher config ---------------
-        self.matcher = AlignedSimOTA(soft_center_radius = cfg.ota_soft_center_radius,
-                                     topk_candidates    = cfg.ota_topk_candidates,
-                                     num_classes        = cfg.num_classes,
-                                     )
-
-    def loss_classes(self, pred_cls, target, beta=2.0):
-        # Quality FocalLoss
-        """
-            pred_cls: (torch.Tensor): [N, C]。
-            target:   (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N)
-        """
-        label, score = target
-        pred_sigmoid = pred_cls.sigmoid()
-        scale_factor = pred_sigmoid
-        zerolabel = scale_factor.new_zeros(pred_cls.shape)
-
-        ce_loss = F.binary_cross_entropy_with_logits(
-            pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
-        
-        bg_class_ind = pred_cls.shape[-1]
-        pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
-        if pos.shape[0] > 0:
-            pos_label = label[pos].long()
-
-            scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
-
-            ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
-                pred_cls[pos, pos_label], score[pos],
-                reduction='none') * scale_factor.abs().pow(beta)
-
-        return ce_loss
-    
-    def loss_bboxes(self, pred_box, gt_box):
-        ious = get_ious(pred_box, gt_box, box_mode="xyxy", iou_type='giou')
-        loss_box = 1.0 - ious
-
-        return loss_box
-
-    def loss_dfl(self, pred_reg, gt_box, anchor, stride):
-        # rescale coords by stride
-        gt_box_s = gt_box / stride
-        anchor_s = anchor / stride
-
-        # compute deltas
-        gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.reg_max - 1)
-
-        gt_left = gt_ltrb_s.to(torch.long)
-        gt_right = gt_left + 1
-
-        weight_left = gt_right.to(torch.float) - gt_ltrb_s
-        weight_right = 1 - weight_left
-
-        # loss left
-        loss_left = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_left.view(-1),
-            reduction='none').view(gt_left.shape) * weight_left
-        # loss right
-        loss_right = F.cross_entropy(
-            pred_reg.view(-1, self.reg_max),
-            gt_right.view(-1),
-            reduction='none').view(gt_left.shape) * weight_right
-
-        loss_dfl = (loss_left + loss_right).mean(-1)
-        
-        return loss_dfl
-
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs          = outputs['pred_cls'][0].shape[0]
-        device      = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        anchors     = outputs['anchors']
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        
-        # --------------- label assignment ---------------
-        cls_targets = []
-        box_targets = []
-        assign_metrics = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)  # [N,]
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)   # [N, 4]
-            assigned_result = self.matcher(fpn_strides=fpn_strides,
-                                           anchors=anchors,
-                                           pred_cls=cls_preds[batch_idx].detach(),
-                                           pred_box=box_preds[batch_idx].detach(),
-                                           gt_labels=tgt_labels,
-                                           gt_bboxes=tgt_bboxes
-                                           )
-            cls_targets.append(assigned_result['assigned_labels'])
-            box_targets.append(assigned_result['assigned_bboxes'])
-            assign_metrics.append(assigned_result['assign_metrics'])
-
-        # List[B, M, C] -> Tensor[BM, C]
-        cls_targets = torch.cat(cls_targets, dim=0)
-        box_targets = torch.cat(box_targets, dim=0)
-        assign_metrics = torch.cat(assign_metrics, dim=0)
-
-        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
-        bg_class_ind = self.num_classes
-        pos_inds = ((cls_targets >= 0) & (cls_targets < bg_class_ind)).nonzero().squeeze(1)
-        num_fgs = assign_metrics.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0).item()
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.loss_classes(cls_preds, (cls_targets, assign_metrics))
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[pos_inds]
-        box_targets_pos = box_targets[pos_inds]
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos)
-        loss_box = loss_box.sum() / num_fgs
-
-        # ------------------ Distribution focal loss  ------------------
-        ## process anchors
-        anchors = torch.cat(outputs['anchors'], dim=0)
-        anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
-        ## process stride tensors
-        strides = torch.cat(outputs['stride_tensor'], dim=0)
-        strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
-        ## fg preds
-        reg_preds_pos = reg_preds.view(-1, 4*self.reg_max)[pos_inds]
-        anchors_pos = anchors[pos_inds]
-        strides_pos = strides[pos_inds]
-        ## compute dfl
-        loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos)
-        loss_dfl = loss_dfl.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_cls_weight * loss_cls + \
-                 self.loss_box_weight * loss_box + \
-                 self.loss_dfl_weight * loss_dfl
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                loss_dfl = loss_dfl,
-                losses = losses
-        )
-
-        return loss_dict
-    

+ 0 - 160
yolo/models/rtcdet/matcher.py

@@ -1,160 +0,0 @@
-# ------------------------------------------------------------------------------------------
-# This code referenced to https://github.com/open-mmlab/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py
-# ------------------------------------------------------------------------------------------
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import box_iou
-
-
-# -------------------------- Aligned SimOTA assigner --------------------------
-class AlignedSimOTA(object):
-    def __init__(self, num_classes, soft_center_radius=3.0, topk_candidates=13):
-        self.num_classes = num_classes
-        self.soft_center_radius = soft_center_radius
-        self.topk_candidates = topk_candidates
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_cls, 
-                 pred_box, 
-                 gt_labels,
-                 gt_bboxes):
-        # [M,]
-        strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        num_gt = len(gt_labels)
-        anchors = torch.cat(anchors, dim=0)
-
-        # check gt
-        if num_gt == 0 or gt_bboxes.max().item() == 0.:
-            return {
-                'assigned_labels': gt_labels.new_full(pred_cls[..., 0].shape, self.num_classes).long(),
-                'assigned_bboxes': gt_bboxes.new_full(pred_box.shape, 0).float(),
-                'assign_metrics':  gt_bboxes.new_full(pred_cls[..., 0].shape, 0).float(),
-            }
-        
-        # get inside points: [N, M]
-        is_in_gt = self.find_inside_points(gt_bboxes, anchors)
-        valid_mask = is_in_gt.sum(dim=0) > 0  # [M,]
-
-        # ----------------------------------- soft center prior -----------------------------------
-        gt_center = (gt_bboxes[..., :2] + gt_bboxes[..., 2:]) / 2.0
-        distance = (anchors.unsqueeze(0) - gt_center.unsqueeze(1)
-                    ).pow(2).sum(-1).sqrt() / strides.unsqueeze(0)  # [N, M]
-        distance = distance * valid_mask.unsqueeze(0)
-        soft_center_prior = torch.pow(10, distance - self.soft_center_radius)
-
-        # ----------------------------------- regression cost -----------------------------------
-        pair_wise_ious, _ = box_iou(gt_bboxes, pred_box)  # [N, M]
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) * 3.0
-
-        # ----------------------------------- classification cost -----------------------------------
-        ## select the predicted scores corresponded to the gt_labels
-        pairwise_pred_scores = pred_cls.permute(1, 0)  # [M, C] -> [C, M]
-        pairwise_pred_scores = pairwise_pred_scores[gt_labels.long(), :].float()   # [N, M]
-        ## scale factor
-        scale_factor = (pair_wise_ious - pairwise_pred_scores.sigmoid()).abs().pow(2.0)
-        ## cls cost
-        pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
-            pairwise_pred_scores, pair_wise_ious,
-            reduction="none") * scale_factor # [N, M]
-            
-        del pairwise_pred_scores
-
-        ## foreground cost matrix
-        cost_matrix = pair_wise_cls_loss + pair_wise_ious_loss + soft_center_prior
-        max_pad_value = torch.ones_like(cost_matrix) * 1e9
-        cost_matrix = torch.where(valid_mask[None].repeat(num_gt, 1),   # [N, M]
-                                  cost_matrix, max_pad_value)
-
-        # ----------------------------------- dynamic label assignment -----------------------------------
-        matched_pred_ious, matched_gt_inds, fg_mask_inboxes = self.dynamic_k_matching(
-            cost_matrix, pair_wise_ious, num_gt)
-        del pair_wise_cls_loss, cost_matrix, pair_wise_ious, pair_wise_ious_loss
-
-        # -----------------------------------process assigned labels -----------------------------------
-        assigned_labels = gt_labels.new_full(pred_cls[..., 0].shape,
-                                             self.num_classes)  # [M,]
-        assigned_labels[fg_mask_inboxes] = gt_labels[matched_gt_inds].squeeze(-1)
-        assigned_labels = assigned_labels.long()  # [M,]
-
-        assigned_bboxes = gt_bboxes.new_full(pred_box.shape, 0)        # [M, 4]
-        assigned_bboxes[fg_mask_inboxes] = gt_bboxes[matched_gt_inds]  # [M, 4]
-
-        assign_metrics = gt_bboxes.new_full(pred_cls[..., 0].shape, 0) # [M, 4]
-        assign_metrics[fg_mask_inboxes] = matched_pred_ious            # [M, 4]
-
-        assigned_dict = dict(
-            assigned_labels=assigned_labels,
-            assigned_bboxes=assigned_bboxes,
-            assign_metrics=assign_metrics
-            )
-        
-        return assigned_dict
-
-    def find_inside_points(self, gt_bboxes, anchors):
-        """
-            gt_bboxes: Tensor -> [N, 2]
-            anchors:   Tensor -> [M, 2]
-        """
-        num_anchors = anchors.shape[0]
-        num_gt = gt_bboxes.shape[0]
-
-        anchors_expand = anchors.unsqueeze(0).repeat(num_gt, 1, 1)           # [N, M, 2]
-        gt_bboxes_expand = gt_bboxes.unsqueeze(1).repeat(1, num_anchors, 1)  # [N, M, 4]
-
-        # offset
-        lt = anchors_expand - gt_bboxes_expand[..., :2]
-        rb = gt_bboxes_expand[..., 2:] - anchors_expand
-        bbox_deltas = torch.cat([lt, rb], dim=-1)
-
-        is_in_gts = bbox_deltas.min(dim=-1).values > 0
-
-        return is_in_gts
-    
-    def dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):
-        """Use IoU and matching cost to calculate the dynamic top-k positive
-        targets.
-
-        Args:
-            cost_matrix (Tensor): Cost matrix.
-            pairwise_ious (Tensor): Pairwise iou matrix.
-            num_gt (int): Number of gt.
-            valid_mask (Tensor): Mask for valid bboxes.
-        Returns:
-            tuple: matched ious and gt indexes.
-        """
-        matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8)
-        # select candidate topk ious for dynamic-k calculation
-        candidate_topk = min(self.topk_candidates, pairwise_ious.size(1))
-        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1)
-        # calculate dynamic k for each gt
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-
-        # sorting the batch cost matirx is faster than topk
-        _, sorted_indices = torch.sort(cost_matrix, dim=1)
-        for gt_idx in range(num_gt):
-            topk_ids = sorted_indices[gt_idx, :dynamic_ks[gt_idx]]
-            matching_matrix[gt_idx, :][topk_ids] = 1
-
-        del topk_ious, dynamic_ks, topk_ids
-
-        prior_match_gt_mask = matching_matrix.sum(0) > 1
-        if prior_match_gt_mask.sum() > 0:
-            cost_min, cost_argmin = torch.min(
-                cost_matrix[:, prior_match_gt_mask], dim=0)
-            matching_matrix[:, prior_match_gt_mask] *= 0
-            matching_matrix[cost_argmin, prior_match_gt_mask] = 1
-
-        # get foreground mask inside box and center prior
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-        matched_pred_ious = (matching_matrix *
-                             pairwise_ious).sum(0)[fg_mask_inboxes]
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-
-        return matched_pred_ious, matched_gt_inds, fg_mask_inboxes
-    

+ 0 - 152
yolo/models/rtcdet/rtcdet.py

@@ -1,152 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .rtcdet_backbone import RTCBackbone
-from .rtcdet_neck     import SPPF
-from .rtcdet_pafpn    import RTCPaFPN
-from .rtcdet_head     import RTCDetHead
-from .rtcdet_pred     import RTCDetPredLayer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# Real-time Convolutional Detector
-class RTCDet(nn.Module):
-    def __init__(self,
-                 cfg,
-                 is_val = False,
-                 ) -> None:
-        super(RTCDet, self).__init__()
-        # ---------------------- Basic setting ----------------------
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        ## Post-process parameters
-        self.topk_candidates  = cfg.val_topk        if is_val else cfg.test_topk
-        self.conf_thresh      = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
-        self.nms_thresh       = cfg.val_nms_thresh  if is_val else cfg.test_nms_thresh
-        self.no_multi_labels  = False if is_val else True
-        
-        # ---------------------- Network Parameters ----------------------
-        ## Backbone
-        self.backbone = RTCBackbone(cfg)
-        self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
-        ## Neck
-        self.neck     = SPPF(cfg, self.pyramid_feat_dims[-1], self.pyramid_feat_dims[-1])
-        self.pyramid_feat_dims[-1] = self.neck.out_dim
-        ## Neck: PaFPN
-        self.fpn      = RTCPaFPN(cfg, self.backbone.feat_dims)
-        ## Head
-        self.head     = RTCDetHead(cfg, self.fpn.out_dims)
-        ## Pred
-        self.pred     = RTCDetPredLayer(cfg, self.head.cls_head_dim, self.head.reg_head_dim)
-
-    def post_process(self, cls_preds, box_preds):
-        """
-        We process predictions at each scale hierarchically
-        Input:
-            cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
-            box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = cls_pred_i.sigmoid().flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes)
-        
-        return bboxes, scores, labels
-    
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.head(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred(cls_feats, reg_feats)
-        outputs['image_size'] = [x.shape[2], x.shape[3]]
-
-        if not self.training:
-            all_cls_preds = outputs['pred_cls']
-            all_box_preds = outputs['pred_box']
-
-            # post process
-            bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-            outputs = {
-                "scores": scores,
-                "labels": labels,
-                "bboxes": bboxes
-            }
-        
-        return outputs 

+ 0 - 141
yolo/models/rtcdet/rtcdet_backbone.py

@@ -1,141 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .rtcdet_basic import BasicConv, ELANLayer, MDown
-except:
-    from  rtcdet_basic import BasicConv, ELANLayer, MDown
-
-
-# ---------------------------- Basic functions ----------------------------
-class RTCBackbone(nn.Module):
-    def __init__(self, cfg):
-        super(RTCBackbone, self).__init__()
-        # ------------------ Basic setting ------------------
-        self.model_scale = cfg.scale
-        self.num_blocks = cfg.num_blocks
-        self.feat_dims = [round(64  * cfg.channel_width),
-                          round(128 * cfg.channel_width),
-                          round(256 * cfg.channel_width),
-                          round(512 * cfg.channel_width),
-                          round(512 * cfg.channel_width * cfg.last_stage_ratio)]
-        
-        # ------------------ Network setting ------------------
-        ## P1/2
-        self.layer_1 = BasicConv(3, self.feat_dims[0],
-                                 kernel_size=6, padding=2, stride=2,
-                                 act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise)
-        # P2/4
-        self.layer_2 = nn.Sequential(
-            BasicConv(self.feat_dims[0], self.feat_dims[1],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            ELANLayer(in_dim     = self.feat_dims[1],
-                      out_dim    = self.feat_dims[1],
-                      num_blocks = self.num_blocks[0],
-                      expansion  = 0.5,
-                      shortcut   = True,
-                      act_type   = cfg.bk_act,
-                      norm_type  = cfg.bk_norm,
-                      depthwise  = cfg.bk_depthwise)
-        )
-        # P3/8
-        self.layer_3 = nn.Sequential(
-            MDown(self.feat_dims[1], self.feat_dims[2],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            ELANLayer(in_dim     = self.feat_dims[2],
-                      out_dim    = self.feat_dims[2],
-                      num_blocks = self.num_blocks[1],
-                      expansion  = 0.5,
-                      shortcut   = True,
-                      act_type   = cfg.bk_act,
-                      norm_type  = cfg.bk_norm,
-                      depthwise  = cfg.bk_depthwise)
-        )
-        # P4/16
-        self.layer_4 = nn.Sequential(
-            MDown(self.feat_dims[2], self.feat_dims[3],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            ELANLayer(in_dim     = self.feat_dims[3],
-                      out_dim    = self.feat_dims[3],
-                      num_blocks = self.num_blocks[2],
-                      expansion  = 0.5,
-                      shortcut   = True,
-                      act_type   = cfg.bk_act,
-                      norm_type  = cfg.bk_norm,
-                      depthwise  = cfg.bk_depthwise)
-        )
-        # P5/32
-        self.layer_5 = nn.Sequential(
-            MDown(self.feat_dims[3], self.feat_dims[4],
-                  act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            ELANLayer(in_dim     = self.feat_dims[4],
-                      out_dim    = self.feat_dims[4],
-                      num_blocks = self.num_blocks[3],
-                      expansion  = 0.5,
-                      shortcut   = True,
-                      act_type   = cfg.bk_act,
-                      norm_type  = cfg.bk_norm,
-                      depthwise  = cfg.bk_depthwise)
-        )
-
-        # Initialize all layers
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-# ---------------------------- Functions ----------------------------
-## build Yolo's Backbone
-def build_backbone(cfg): 
-    # model
-    backbone = RTCBackbone(cfg)
-        
-    return backbone
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    class BaseConfig(object):
-        def __init__(self) -> None:
-            self.bk_act = 'silu'
-            self.bk_norm = 'BN'
-            self.bk_depthwise = False
-            self.width = 1.0
-            self.depth = 1.0
-            self.ratio = 1.0
-            self.scale = "n"
-
-    cfg = BaseConfig()
-    model = build_backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 224
yolo/models/rtcdet/rtcdet_basic.py

@@ -1,224 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-
-# --------------------- Basic modules ---------------------
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-        
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 dilation=1,               # dilation
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                 depthwise :bool = False
-                ):
-        super(BasicConv, self).__init__()
-        self.depthwise = depthwise
-        if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
-            self.norm = get_norm(norm_type, out_dim)
-        else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
-            self.norm1 = get_norm(norm_type, in_dim)
-            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
-            self.norm2 = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        if not self.depthwise:
-            return self.act(self.norm(self.conv(x)))
-        else:
-            # Depthwise conv
-            x = self.norm1(self.conv1(x))
-            # Pointwise conv
-            x = self.norm2(self.conv2(x))
-            return x
-
-
-# --------------------- Yolov8 modules ---------------------
-class MDown(nn.Module):
-    def __init__(self,
-                 in_dim    :int,
-                 out_dim   :int,
-                 act_type  :str   = 'silu',
-                 norm_type :str   = 'BN',
-                 depthwise :bool  = False,
-                 ) -> None:
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.downsample_1 = nn.Sequential(
-            nn.MaxPool2d((2, 2), stride=2),
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        )
-        self.downsample_2 = nn.Sequential(
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type),
-            BasicConv(inter_dim, inter_dim,
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-        if in_dim == out_dim:
-            self.output_proj = nn.Identity()
-        else:
-            self.output_proj = BasicConv(inter_dim * 2, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        x1 = self.downsample_1(x)
-        x2 = self.downsample_2(x)
-
-        out = self.output_proj(torch.cat([x1, x2], dim=1))
-
-        return out
-
-class Bottleneck(nn.Module):
-    def __init__(self,
-                 in_dim      :int,
-                 out_dim     :int,
-                 kernel_size :List  = [1, 3],
-                 expansion   :float = 0.5,
-                 shortcut    :bool  = False,
-                 act_type    :str   = 'silu',
-                 norm_type   :str   = 'BN',
-                 depthwise   :bool  = False,
-                 ) -> None:
-        super(Bottleneck, self).__init__()
-        inter_dim = int(out_dim * expansion)
-        # ----------------- Network setting -----------------
-        self.conv_layer1 = BasicConv(in_dim, inter_dim,
-                                     kernel_size=kernel_size[0], padding=kernel_size[0]//2, stride=1,
-                                     act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.conv_layer2 = BasicConv(inter_dim, out_dim,
-                                     kernel_size=kernel_size[1], padding=kernel_size[1]//2, stride=1,
-                                     act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.shortcut = shortcut and in_dim == out_dim
-
-    def forward(self, x):
-        h = self.conv_layer2(self.conv_layer1(x))
-
-        return x + h if self.shortcut else h
-
-class ELANLayer(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expansion  :float = 0.5,
-                 num_blocks :int   = 1,
-                 shortcut   :bool  = False,
-                 act_type   :str   = 'silu',
-                 norm_type  :str   = 'BN',
-                 depthwise  :bool  = False,
-                 ) -> None:
-        super(ELANLayer, self).__init__()
-        inter_dim = round(out_dim * expansion)
-        self.input_proj  = BasicConv(in_dim, inter_dim * 2, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.output_proj = BasicConv((2 + num_blocks) * inter_dim, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.module      = nn.ModuleList([Bottleneck(inter_dim,
-                                                     inter_dim,
-                                                     kernel_size = [3, 3],
-                                                     expansion   = 1.0,
-                                                     shortcut    = shortcut,
-                                                     act_type    = act_type,
-                                                     norm_type   = norm_type,
-                                                     depthwise   = depthwise)
-                                                     for _ in range(num_blocks)])
-
-    def forward(self, x):
-        # Input proj
-        x1, x2 = torch.chunk(self.input_proj(x), 2, dim=1)
-        out = list([x1, x2])
-
-        # Bottlenecl
-        out.extend(m(out[-1]) for m in self.module)
-
-        # Output proj
-        out = self.output_proj(torch.cat(out, dim=1))
-
-        return out
-
-class ELANLayerFPN(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 num_blocks :int   = 1,
-                 expansion  :float = 0.5,
-                 act_type   :str   = 'silu',
-                 norm_type  :str   = 'BN',
-                 depthwise  :bool  = False,
-                 ) -> None:
-        super(ELANLayerFPN, self).__init__()
-        inter_dim_1 = round(out_dim * expansion)
-        inter_dim_2 = round(inter_dim_1* expansion)
-        # Branch-1
-        self.branch_1 = BasicConv(in_dim, inter_dim_1, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        # Branch-2
-        self.branch_2 = BasicConv(in_dim, inter_dim_1, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        # Branch-3
-        branch_3 = []
-        for i in range(num_blocks):
-            if i == 0:
-                branch_3.append(BasicConv(inter_dim_1, inter_dim_2, kernel_size=3, padding=1,
-                                          act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-            else:
-                branch_3.append(BasicConv(inter_dim_2, inter_dim_2, kernel_size=3, padding=1,
-                                          act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-        self.branch_3 = nn.Sequential(*branch_3)
-        # Branch-4
-        self.branch_4 = nn.Sequential(*[BasicConv(inter_dim_2, inter_dim_2, kernel_size=3, padding=1,
-                                                  act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                     for _ in range(num_blocks)])
-        # Branch-5
-        self.branch_5 = nn.Sequential(*[BasicConv(inter_dim_2, inter_dim_2, kernel_size=3, padding=1,
-                                                  act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                     for _ in range(num_blocks)])
-        # Branch-6
-        self.branch_6 = nn.Sequential(*[BasicConv(inter_dim_2, inter_dim_2, kernel_size=3, padding=1,
-                                                  act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                     for _ in range(num_blocks)])
-        self.output_proj = BasicConv(2*inter_dim_1 + 4*inter_dim_2, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-
-    def forward(self, x):
-        # Elan
-        x1 = self.branch_1(x)
-        x2 = self.branch_2(x)
-        x3 = self.branch_3(x2)
-        x4 = self.branch_4(x3)
-        x5 = self.branch_5(x4)
-        x6 = self.branch_6(x5)
-
-        # Output proj
-        out = list([x1, x2, x3, x4, x5, x6])
-        out = self.output_proj(torch.cat(out, dim=1))
-
-        return out

+ 0 - 126
yolo/models/rtcdet/rtcdet_head.py

@@ -1,126 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .rtcdet_basic import BasicConv
-
-
-# -------------------- Detection Head --------------------
-## Single-level Detection Head
-class DetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 depthwise    :bool = False):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                reg_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class RTCDetHead(nn.Module):
-    def __init__(self, cfg, in_dims):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [DetHead(in_dim       = in_dims[level],
-                     cls_head_dim = max(in_dims[0], min(cfg.num_classes, 128)),
-                     reg_head_dim = max(in_dims[0]//4, 16, 4*cfg.reg_max),
-                     num_cls_head = cfg.num_cls_head,
-                     num_reg_head = cfg.num_reg_head,
-                     act_type     = cfg.head_act,
-                     norm_type    = cfg.head_norm,
-                     depthwise    = cfg.head_depthwise)
-                     for level in range(cfg.num_levels)
-                     ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats

+ 0 - 33
yolo/models/rtcdet/rtcdet_neck.py

@@ -1,33 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .rtcdet_basic import BasicConv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim):
-        super().__init__()
-        ## ----------- Basic Parameters -----------
-        inter_dim = round(in_dim * cfg.neck_expand_ratio)
-        self.out_dim = out_dim
-        ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
-                              stride=1,
-                              padding=cfg.spp_pooling_size // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))

+ 0 - 98
yolo/models/rtcdet/rtcdet_pafpn.py

@@ -1,98 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from typing import List
-
-from .rtcdet_basic import ELANLayerFPN, MDown, BasicConv
-
-
-# Modified YOLOv8's PaFPN
-class RTCPaFPN(nn.Module):
-    def __init__(self,
-                 cfg,
-                 in_dims :List = [256, 512, 1024],
-                 ) -> None:
-        super(RTCPaFPN, self).__init__()
-        print('==============================')
-        print('FPN: {}'.format("RTC-PaFPN"))
-        # --------------------------- Basic Parameters ---------------------------
-        self.in_dims = in_dims[::-1]
-        self.out_dims = [round(256*cfg.channel_width), round(512*cfg.channel_width), round(1024*cfg.channel_width)]
-
-        # ----------------------------- Yolov8's Top-down FPN -----------------------------
-        ## P5 -> P4
-        self.top_down_layer_1 = ELANLayerFPN(in_dim     = self.in_dims[0] + self.in_dims[1],
-                                             out_dim    = round(512*cfg.channel_width),
-                                             expansion  = 0.5,
-                                             num_blocks = cfg.fpn_num_blocks,
-                                             act_type   = cfg.fpn_act,
-                                             norm_type  = cfg.fpn_norm,
-                                             depthwise  = cfg.fpn_depthwise,
-                                             )
-        ## P4 -> P3
-        self.top_down_layer_2 = ELANLayerFPN(in_dim     = self.in_dims[2] + round(512*cfg.channel_width),
-                                             out_dim    = round(256*cfg.channel_width),
-                                             expansion  = 0.5,
-                                             num_blocks = cfg.fpn_num_blocks,
-                                             act_type   = cfg.fpn_act,
-                                             norm_type  = cfg.fpn_norm,
-                                             depthwise  = cfg.fpn_depthwise,
-                                             )
-        # ----------------------------- Yolov8's Bottom-up PAN -----------------------------
-        ## P3 -> P4
-        self.dowmsample_layer_1 = MDown(round(256*cfg.channel_width), round(256*cfg.channel_width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.bottom_up_layer_1  = ELANLayerFPN(in_dim     = round(256*cfg.channel_width) + round(512*cfg.channel_width),
-                                               out_dim    = round(512*cfg.channel_width),
-                                               expansion  = 0.5,
-                                               num_blocks = cfg.fpn_num_blocks,
-                                               act_type   = cfg.fpn_act,
-                                               norm_type  = cfg.fpn_norm,
-                                               depthwise  = cfg.fpn_depthwise,
-                                               )
-        ## P4 -> P5
-        self.dowmsample_layer_2 = MDown(round(512*cfg.channel_width), round(512*cfg.channel_width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.bottom_up_layer_2  = ELANLayerFPN(in_dim     = round(512*cfg.channel_width) + self.in_dims[0],
-                                               out_dim    = round(1024*cfg.channel_width),
-                                               expansion  = 0.5,
-                                               num_blocks = cfg.fpn_num_blocks,
-                                               act_type   = cfg.fpn_act,
-                                               norm_type  = cfg.fpn_norm,
-                                               depthwise  = cfg.fpn_depthwise,
-                                               )
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # ------------------ Top down FPN ------------------
-        ## P5 -> P4
-        p5_up = F.interpolate(c5, scale_factor=2.0)
-        p4 = self.top_down_layer_1(torch.cat([p5_up, c4], dim=1))
-
-        ## P4 -> P3
-        p4_up = F.interpolate(p4, scale_factor=2.0)
-        p3 = self.top_down_layer_2(torch.cat([p4_up, c3], dim=1))
-
-        # ------------------ Bottom up FPN ------------------
-        ## p3 -> P4
-        p3_ds = self.dowmsample_layer_1(p3)
-        p4 = self.bottom_up_layer_1(torch.cat([p3_ds, p4], dim=1))
-
-        ## P4 -> 5
-        p4_ds = self.dowmsample_layer_2(p4)
-        p5 = self.bottom_up_layer_2(torch.cat([p4_ds, c5], dim=1))
-
-        out_feats = [p3, p4, p5] # [P3, P4, P5]
-                
-        return out_feats

+ 0 - 153
yolo/models/rtcdet/rtcdet_pred.py

@@ -1,153 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-# -------------------- Detection Pred Layer --------------------
-## Single-level pred layer
-class DetPredLayer(nn.Module):
-    def __init__(self,
-                 cls_dim     :int = 256,
-                 reg_dim     :int = 256,
-                 stride      :int = 32,
-                 reg_max     :int = 16,
-                 num_classes :int = 80,
-                 num_coords  :int = 4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.reg_max = reg_max
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred bias
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # pred
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # generate anchor boxes: [M, 4]
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-        # stride tensor: [M, 1]
-        stride_tensor = torch.ones_like(anchors[..., :1]) * self.stride
-        
-        # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4*self.reg_max)
-        
-        # output dict
-        outputs = {"pred_cls": cls_pred,            # List(Tensor) [B, M, C]
-                   "pred_reg": reg_pred,            # List(Tensor) [B, M, 4*(reg_max)]
-                   "anchors": anchors,              # List(Tensor) [M, 2]
-                   "strides": self.stride,          # List(Int) = [8, 16, 32]
-                   "stride_tensor": stride_tensor   # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-
-## Multi-level pred layer
-class RTCDetPredLayer(nn.Module):
-    def __init__(self,
-                 cfg,
-                 cls_dim,
-                 reg_dim,
-                 ):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [DetPredLayer(cls_dim     = cls_dim,
-                          reg_dim     = reg_dim,
-                          stride      = cfg.out_stride[level],
-                          reg_max     = cfg.reg_max,
-                          num_classes = cfg.num_classes,
-                          num_coords  = 4 * cfg.reg_max)
-                          for level in range(cfg.num_levels)
-                          ])
-        ## proj conv
-        proj_init = torch.arange(cfg.reg_max, dtype=torch.float)
-        self.proj_conv = nn.Conv2d(cfg.reg_max, 1, kernel_size=1, bias=False).requires_grad_(False)
-        self.proj_conv.weight.data[:] = nn.Parameter(proj_init.view([1, cfg.reg_max, 1, 1]), requires_grad=False)
-
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.cfg.num_levels):
-            # -------------- Single-level prediction --------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # -------------- Decode bbox --------------
-            B, M = outputs["pred_reg"].shape[:2]
-            # [B, M, 4*(reg_max)] -> [B, M, 4, reg_max]
-            delta_pred = outputs["pred_reg"].reshape([B, M, 4, self.cfg.reg_max])
-            # [B, M, 4, reg_max] -> [B, reg_max, 4, M]
-            delta_pred = delta_pred.permute(0, 3, 2, 1).contiguous()
-            # [B, reg_max, 4, M] -> [B, 1, 4, M]
-            delta_pred = self.proj_conv(F.softmax(delta_pred, dim=1))
-            # [B, 1, 4, M] -> [B, 4, M] -> [B, M, 4]
-            delta_pred = delta_pred.view(B, 4, M).permute(0, 2, 1).contiguous()
-            ## tlbr -> xyxy
-            x1y1_pred = outputs["anchors"][None] - delta_pred[..., :2] * self.cfg.out_stride[level]
-            x2y2_pred = outputs["anchors"][None] + delta_pred[..., 2:] * self.cfg.out_stride[level]
-            box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
-
-            # collect results
-            all_cls_preds.append(outputs["pred_cls"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_box_preds.append(box_pred)
-            all_anchors.append(outputs["anchors"])
-            all_strides.append(outputs["stride_tensor"])
-        
-        # output dict
-        outputs = {"pred_cls":      all_cls_preds,         # List(Tensor) [B, M, C]
-                   "pred_reg":      all_reg_preds,         # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box":      all_box_preds,         # List(Tensor) [B, M, 4]
-                   "anchors":       all_anchors,           # List(Tensor) [M, 2]
-                   "stride_tensor": all_strides,           # List(Tensor) [M, 1]
-                   "strides":       self.cfg.out_stride,   # List(Int) = [8, 16, 32]
-                   }
-
-        return outputs