yjh0410 1 rok pred
rodič
commit
4ecf2dc8ff

+ 4 - 1
config/__init__.py

@@ -1,5 +1,6 @@
 # ------------------ Model Config ------------------
 from .yolov1_config   import build_yolov1_config
+from .yolov2_config   import build_yolov2_config
 from .yolov8_config   import build_yolov8_config
 from .rtdetr_config import build_rtdetr_config
 
@@ -7,8 +8,10 @@ def build_config(args):
     print('==============================')
     print('Model: {} ...'.format(args.model.upper()))
     # YOLOv8
-    if 'yolov1' in args.model:
+    if   'yolov1' in args.model:
         cfg = build_yolov1_config(args)
+    elif 'yolov2' in args.model:
+        cfg = build_yolov2_config(args)
     elif 'yolov8' in args.model:
         cfg = build_yolov8_config(args)
     # RT-DETR

+ 99 - 0
config/yolov2_config.py

@@ -0,0 +1,99 @@
+# yolo Config
+
+
+def build_yolov2_config(args):
+    if args.model == 'yolov2_r18':
+        return Yolov2R18Config()
+    else:
+        raise NotImplementedError("No config for model: {}".format(args.model))
+    
+# YOLOv8-Base config
+class Yolov2BaseConfig(object):
+    def __init__(self) -> None:
+        # ---------------- Model config ----------------
+        self.out_stride = 32
+        self.max_stride = 32
+        ## Backbone
+        self.backbone       = 'resnet50'
+        self.use_pretrained = True
+        ## Neck
+        self.neck_act       = 'lrelu'
+        self.neck_norm      = 'BN'
+        self.neck_depthwise = False
+        self.neck_expand_ratio = 0.5
+        self.spp_pooling_size  = 5
+        ## Head
+        self.head_act  = 'lrelu'
+        self.head_norm = 'BN'
+        self.head_depthwise = False
+        self.head_dim  = 512
+        self.num_cls_head = 2
+        self.num_reg_head = 2
+        self.anchor_sizes = [[17, 25], [55, 75], [92, 206], [202, 21], [289, 311]]
+
+        # ---------------- Post-process config ----------------
+        ## Post process
+        self.val_topk = 1000
+        self.val_conf_thresh = 0.001
+        self.val_nms_thresh  = 0.7
+        self.test_topk = 100
+        self.test_conf_thresh = 0.2
+        self.test_nms_thresh  = 0.5
+
+        # ---------------- Assignment config ----------------
+        ## Matcher
+        self.iou_thresh = 0.5
+        ## Loss weight
+        self.loss_obj = 1.0
+        self.loss_cls = 1.0
+        self.loss_box = 5.0
+
+        # ---------------- ModelEMA config ----------------
+        self.use_ema   = True
+        self.ema_decay = 0.9998
+        self.ema_tau   = 2000
+
+        # ---------------- Optimizer config ----------------
+        self.trainer      = 'yolo'
+        self.optimizer    = 'adamw'
+        self.per_image_lr = 0.001 / 64
+        self.base_lr      = None      # base_lr = per_image_lr * batch_size
+        self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
+        self.momentum     = 0.9
+        self.weight_decay = 0.05
+        self.clip_max_norm   = -1.
+        self.warmup_bias_lr  = 0.1
+        self.warmup_momentum = 0.8
+
+        # ---------------- Lr Scheduler config ----------------
+        self.warmup_epoch = 3
+        self.lr_scheduler = "cosine"
+        self.max_epoch    = 150
+        self.eval_epoch   = 10
+        self.no_aug_epoch = 20
+
+        # ---------------- Data process config ----------------
+        self.aug_type = 'ssd'
+        self.box_format = 'xyxy'
+        self.normalize_coords = False
+        self.mosaic_prob = 0.0
+        self.mixup_prob  = 0.0
+        self.copy_paste  = 0.0          # approximated by the YOLOX's mixup
+        self.multi_scale = [0.5, 1.25]   # multi scale: [img_size * 0.5, img_size * 1.5]
+        ## Pixel mean & std
+        self.pixel_mean = [123.675, 116.28, 103.53]   # RGB format
+        self.pixel_std  = [58.395, 57.12, 57.375]     # RGB format
+        ## Transforms
+        self.train_img_size = 640
+        self.test_img_size  = 640
+
+    def print_config(self):
+        config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
+        for k, v in config_dict.items():
+            print("{} : {}".format(k, v))
+
+# YOLOv8-S
+class Yolov2R18Config(Yolov2BaseConfig):
+    def __init__(self) -> None:
+        super().__init__()
+        self.backbone = 'resnet18'

+ 7 - 2
models/__init__.py

@@ -3,15 +3,20 @@
 
 import torch
 from .yolov1.build import build_yolov1
+from .yolov2.build import build_yolov2
 from .yolov8.build import build_yolov8
 from .rtdetr.build import build_rtdetr
 
 # build object detector
 def build_model(args, cfg, is_val=False):
     # ------------ build object detector ------------
-    ## YOLOv8
-    if 'yolov1' in args.model:
+    ## Modified YOLOv1
+    if   'yolov1' in args.model:
         model, criterion = build_yolov1(cfg, is_val)
+    ## Modified YOLOv2
+    elif 'yolov2' in args.model:
+        model, criterion = build_yolov2(cfg, is_val)
+    ## YOLOv8
     elif 'yolov8' in args.model:
         model, criterion = build_yolov8(cfg, is_val)
     ## RT-DETR

+ 2 - 2
models/yolov1/loss.py

@@ -1,6 +1,6 @@
 import torch
 import torch.nn.functional as F
-from .matcher import YoloMatcher
+from .matcher import Yolov1Matcher
 from utils.box_ops import get_ious
 from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
 
@@ -14,7 +14,7 @@ class SetCriterion(object):
         self.loss_box_weight = cfg.loss_box
 
         # matcher
-        self.matcher = YoloMatcher(cfg.num_classes)
+        self.matcher = Yolov1Matcher(cfg.num_classes)
 
     def loss_objectness(self, pred_obj, gt_obj):
         loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')

+ 1 - 1
models/yolov1/matcher.py

@@ -2,7 +2,7 @@ import torch
 import numpy as np
 
 
-class YoloMatcher(object):
+class Yolov1Matcher(object):
     def __init__(self, num_classes):
         self.num_classes = num_classes
 

+ 60 - 0
models/yolov2/README.md

@@ -0,0 +1,60 @@
+# Redesigned YOLOv2:
+
+- VOC
+
+| Model  |  Backbone  | Batch | Scale | AP<sup>val<br>0.5 | Weight |
+|--------|------------|-------|-------|-------------------|--------|
+| YOLOv2 | ResNet-18  | 1xb16 |  640  |                   |  |
+
+- COCO
+
+| Model  |  Backbone  | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |
+|--------|------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|
+| YOLOv2 | ResNet-18  | 1xb16 |  640  |                    |               |   38.0            |   21.5             | [ckpt](https://github.com/yjh0410/RT-ODLab/releases/download/yolo_tutorial_ckpt/yolov2_coco.pth) |
+
+- For training, we train redesigned YOLOv2 with 150 epochs on COCO. We also gradient accumulate.
+- For data augmentation, we only use the large scale jitter (LSJ), no Mosaic or Mixup augmentation.
+- For optimizer, we use SGD with momentum 0.937, weight decay 0.0005 and base lr 0.01.
+- For learning rate scheduler, we use linear decay scheduler.
+
+
+## Train YOLOv2
+### Single GPU
+Taking training YOLOv2 on COCO as the example,
+```Shell
+python train.py --cuda -d coco --root path/to/coco -m yolov2 -bs 16 -size 640 --wp_epoch 3 --max_epoch 150 --eval_epoch 10 --no_aug_epoch 10 --ema --fp16 --multi_scale 
+```
+
+### Multi GPU
+Taking training YOLOv2 on COCO as the example,
+```Shell
+python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root /data/datasets/ -m yolov2 -bs 128 -size 640 --wp_epoch 3 --max_epoch 150  --eval_epoch 10 --no_aug_epoch 20 --ema --fp16 --sybn --multi_scale --save_folder weights/ 
+```
+
+## Test YOLOv2
+Taking testing YOLOv2 on COCO-val as the example,
+```Shell
+python test.py --cuda -d coco --root path/to/coco -m yolov2 --weight path/to/yolov2.pth -size 640 -vt 0.3 --show 
+```
+
+## Evaluate YOLOv2
+Taking evaluating YOLOv2 on COCO-val as the example,
+```Shell
+python eval.py --cuda -d coco-val --root path/to/coco -m yolov2 --weight path/to/yolov2.pth 
+```
+
+## Demo
+### Detect with Image
+```Shell
+python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m yolov2 --weight path/to/weight -size 640 -vt 0.3 --show
+```
+
+### Detect with Video
+```Shell
+python demo.py --mode video --path_to_vid path/to/video --cuda -m yolov2 --weight path/to/weight -size 640 -vt 0.3 --show --gif
+```
+
+### Detect with Camera
+```Shell
+python demo.py --mode camera --cuda -m yolov2 --weight path/to/weight -size 640 -vt 0.3 --show --gif
+```

+ 16 - 0
models/yolov2/build.py

@@ -0,0 +1,16 @@
+from .loss import SetCriterion
+from .yolov2 import Yolov2
+
+
+# build object detector
+def build_yolov2(cfg, is_val=False):
+    # -------------- Build YOLO --------------
+    model = Yolov2(cfg, is_val)
+  
+    # -------------- Build criterion --------------
+    criterion = None
+    if is_val:
+        # build criterion for training
+        criterion = SetCriterion(cfg)
+        
+    return model, criterion

+ 98 - 0
models/yolov2/loss.py

@@ -0,0 +1,98 @@
+import torch
+import torch.nn.functional as F
+from .matcher import Yolov2Matcher
+from utils.box_ops import get_ious
+from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
+
+
+class SetCriterion(object):
+    def __init__(self, cfg):
+        self.cfg = cfg
+        self.num_classes = cfg.num_classes
+        self.loss_obj_weight = cfg.loss_obj
+        self.loss_cls_weight = cfg.loss_cls
+        self.loss_box_weight = cfg.loss_box
+
+        # matcher
+        self.matcher = Yolov2Matcher(cfg.iou_thresh, cfg.num_classes, cfg.anchor_sizes)
+
+    def loss_objectness(self, pred_obj, gt_obj):
+        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
+
+        return loss_obj
+    
+    def loss_classes(self, pred_cls, gt_label):
+        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
+
+        return loss_cls
+
+    def loss_bboxes(self, pred_box, gt_box):
+        # regression loss
+        ious = get_ious(pred_box,
+                        gt_box,
+                        box_mode="xyxy",
+                        iou_type='giou')
+        loss_box = 1.0 - ious
+
+        return loss_box
+
+    def __call__(self, outputs, targets):
+        device = outputs['pred_cls'][0].device
+        stride = outputs['stride']
+        fmp_size = outputs['fmp_size']
+        (
+            gt_objectness, 
+            gt_classes, 
+            gt_bboxes,
+            ) = self.matcher(fmp_size=fmp_size, 
+                             stride=stride, 
+                             targets=targets)
+        # List[B, M, C] -> [B, M, C] -> [BM, C]
+        pred_obj = outputs['pred_obj'].view(-1)        # [B, M, 1] -> [BM,]
+        pred_cls = outputs['pred_cls'].flatten(0, 1)   # [B, M, C] -> [BM, C]
+        pred_box = outputs['pred_box'].flatten(0, 1)   # [B, M, 4] -> [BM, 4]
+       
+        gt_objectness = gt_objectness.view(-1).to(device).float()               # [BM,]
+        gt_classes = gt_classes.view(-1, self.num_classes).to(device).float()   # [BM, C]
+        gt_bboxes = gt_bboxes.view(-1, 4).to(device).float()                    # [BM, 4]
+
+        pos_masks = (gt_objectness > 0)
+        num_fgs = pos_masks.sum()
+
+        if is_dist_avail_and_initialized():
+            torch.distributed.all_reduce(num_fgs)
+        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
+
+        # obj loss
+        loss_obj = self.loss_objectness(pred_obj, gt_objectness)
+        loss_obj = loss_obj.sum() / num_fgs
+
+        # cls loss
+        pred_cls_pos = pred_cls[pos_masks]
+        gt_classes_pos = gt_classes[pos_masks]
+        loss_cls = self.loss_classes(pred_cls_pos, gt_classes_pos)
+        loss_cls = loss_cls.sum() / num_fgs
+
+        # box loss
+        pred_box_pos = pred_box[pos_masks]
+        gt_bboxes_pos = gt_bboxes[pos_masks]
+        loss_box = self.loss_bboxes(pred_box_pos, gt_bboxes_pos)
+        loss_box = loss_box.sum() / num_fgs
+        
+        # total loss
+        losses = self.loss_obj_weight * loss_obj + \
+                 self.loss_cls_weight * loss_cls + \
+                 self.loss_box_weight * loss_box
+
+        loss_dict = dict(
+                loss_obj = loss_obj,
+                loss_cls = loss_cls,
+                loss_box = loss_box,
+                losses = losses
+        )
+
+        return loss_dict
+    
+    
+if __name__ == "__main__":
+    pass

+ 139 - 0
models/yolov2/matcher.py

@@ -0,0 +1,139 @@
+import torch
+import numpy as np
+
+
+class Yolov2Matcher(object):
+    def __init__(self, iou_thresh, num_classes, anchor_size):
+        self.num_classes = num_classes
+        self.iou_thresh = iou_thresh
+        # anchor box
+        self.num_anchors = len(anchor_size)
+        self.anchor_size = anchor_size
+        self.anchor_boxes = np.array(
+            [[0., 0., anchor[0], anchor[1]]
+            for anchor in anchor_size]
+            )  # [KA, 4]
+
+
+    def compute_iou(self, anchor_boxes, gt_box):
+        """
+            anchor_boxes : ndarray -> [KA, 4] (cx, cy, bw, bh).
+            gt_box : ndarray -> [1, 4] (cx, cy, bw, bh).
+        """
+        # anchors: [KA, 4]
+        anchors = np.zeros_like(anchor_boxes)
+        anchors[..., :2] = anchor_boxes[..., :2] - anchor_boxes[..., 2:] * 0.5  # x1y1
+        anchors[..., 2:] = anchor_boxes[..., :2] + anchor_boxes[..., 2:] * 0.5  # x2y2
+        anchors_area = anchor_boxes[..., 2] * anchor_boxes[..., 3]
+        
+        # gt_box: [1, 4] -> [KA, 4]
+        gt_box = np.array(gt_box).reshape(-1, 4)
+        gt_box = np.repeat(gt_box, anchors.shape[0], axis=0)
+        gt_box_ = np.zeros_like(gt_box)
+        gt_box_[..., :2] = gt_box[..., :2] - gt_box[..., 2:] * 0.5  # x1y1
+        gt_box_[..., 2:] = gt_box[..., :2] + gt_box[..., 2:] * 0.5  # x2y2
+        gt_box_area = np.prod(gt_box[..., 2:] - gt_box[..., :2], axis=1)
+
+        # intersection
+        inter_w = np.minimum(anchors[:, 2], gt_box_[:, 2]) - \
+                  np.maximum(anchors[:, 0], gt_box_[:, 0])
+        inter_h = np.minimum(anchors[:, 3], gt_box_[:, 3]) - \
+                  np.maximum(anchors[:, 1], gt_box_[:, 1])
+        inter_area = inter_w * inter_h
+        
+        # union
+        union_area = anchors_area + gt_box_area - inter_area
+
+        # iou
+        iou = inter_area / union_area
+        iou = np.clip(iou, a_min=1e-10, a_max=1.0)
+        
+        return iou
+
+
+    @torch.no_grad()
+    def __call__(self, fmp_size, stride, targets):
+        """
+            img_size: (Int) input image size
+            stride: (Int) -> stride of YOLOv1 output.
+            targets: (Dict) dict{'boxes': [...], 
+                                 'labels': [...], 
+                                 'orig_size': ...}
+        """
+        # prepare
+        bs = len(targets)
+        fmp_h, fmp_w = fmp_size
+        gt_objectness = np.zeros([bs, fmp_h, fmp_w, self.num_anchors, 1]) 
+        gt_classes = np.zeros([bs, fmp_h, fmp_w, self.num_anchors, self.num_classes]) 
+        gt_bboxes = np.zeros([bs, fmp_h, fmp_w, self.num_anchors, 4])
+
+        for batch_index in range(bs):
+            targets_per_image = targets[batch_index]
+            # [N,]
+            tgt_cls = targets_per_image["labels"].numpy()
+            # [N, 4]
+            tgt_box = targets_per_image['boxes'].numpy()
+
+            for gt_box, gt_label in zip(tgt_box, tgt_cls):
+                x1, y1, x2, y2 = gt_box
+                # xyxy -> cxcywh
+                xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
+                bw, bh = x2 - x1, y2 - y1
+                gt_box = [0, 0, bw, bh]
+
+                # check
+                if bw < 1. or bh < 1.:
+                    continue    
+
+                # compute IoU
+                iou = self.compute_iou(self.anchor_boxes, gt_box)
+                iou_mask = (iou > self.iou_thresh)
+
+                label_assignment_results = []
+                if iou_mask.sum() == 0:
+                    # We assign the anchor box with highest IoU score.
+                    iou_ind = np.argmax(iou)
+                    anchor_idx = iou_ind
+                    # compute the grid cell
+                    xc_s = xc / stride
+                    yc_s = yc / stride
+                    grid_x = int(xc_s)
+                    grid_y = int(yc_s)
+
+                    label_assignment_results.append([grid_x, grid_y, anchor_idx])
+                else:            
+                    for iou_ind, iou_m in enumerate(iou_mask):
+                        if iou_m:
+                            anchor_idx = iou_ind
+                            # compute the gride cell
+                            xc_s = xc / stride
+                            yc_s = yc / stride
+                            grid_x = int(xc_s)
+                            grid_y = int(yc_s)
+
+                            label_assignment_results.append([grid_x, grid_y, anchor_idx])
+
+                # label assignment
+                for result in label_assignment_results:
+                    grid_x, grid_y, anchor_idx = result
+                    if grid_x < fmp_w and grid_y < fmp_h:
+                        # obj
+                        gt_objectness[batch_index, grid_y, grid_x, anchor_idx] = 1.0
+                        # cls
+                        cls_ont_hot = np.zeros(self.num_classes)
+                        cls_ont_hot[int(gt_label)] = 1.0
+                        gt_classes[batch_index, grid_y, grid_x, anchor_idx] = cls_ont_hot
+                        # box
+                        gt_bboxes[batch_index, grid_y, grid_x, anchor_idx] = np.array([x1, y1, x2, y2])
+
+        # [B, H, W, A, C] -> [B, HWA, C]
+        gt_objectness = gt_objectness.reshape(bs, -1, 1)
+        gt_classes = gt_classes.reshape(bs, -1, self.num_classes)
+        gt_bboxes = gt_bboxes.reshape(bs, -1, 4)
+
+        # to tensor
+        gt_objectness = torch.from_numpy(gt_objectness).float()
+        gt_classes = torch.from_numpy(gt_classes).float()
+        gt_bboxes = torch.from_numpy(gt_bboxes).float()
+
+        return gt_objectness, gt_classes, gt_bboxes

+ 146 - 0
models/yolov2/yolov2.py

@@ -0,0 +1,146 @@
+# --------------- Torch components ---------------
+import torch
+import torch.nn as nn
+
+# --------------- Model components ---------------
+from .yolov2_backbone import Yolov2Backbone
+from .yolov2_neck     import SPPF
+from .yolov2_head     import Yolov2DetHead
+from .yolov2_pred     import Yolov2DetPredLayer
+
+# --------------- External components ---------------
+from utils.misc import multiclass_nms
+
+
+# YOLOv2
+class Yolov2(nn.Module):
+    def __init__(self,
+                 cfg,
+                 is_val = False,
+                 ) -> None:
+        super(Yolov2, self).__init__()
+        # ---------------------- Basic setting ----------------------
+        self.cfg = cfg
+        self.num_classes = cfg.num_classes
+        ## Post-process parameters
+        self.topk_candidates  = cfg.val_topk        if is_val else cfg.test_topk
+        self.conf_thresh      = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
+        self.nms_thresh       = cfg.val_nms_thresh  if is_val else cfg.test_nms_thresh
+        self.no_multi_labels  = False if is_val else True
+        
+        # ---------------------- Network Parameters ----------------------
+        self.backbone = Yolov2Backbone(cfg)
+        self.neck     = SPPF(cfg, self.backbone.feat_dim, cfg.head_dim)
+        self.head     = Yolov2DetHead(cfg, self.neck.out_dim)
+        self.pred     = Yolov2DetPredLayer(cfg, self.num_classes)
+
+    def post_process(self, obj_preds, cls_preds, box_preds):
+        """
+        We process predictions at each scale hierarchically
+        Input:
+            obj_preds: List[torch.Tensor] -> [[B, M, 1], ...], B=1
+            cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
+            box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
+        Output:
+            bboxes: np.array -> [N, 4]
+            scores: np.array -> [N,]
+            labels: np.array -> [N,]
+        """
+        all_scores = []
+        all_labels = []
+        all_bboxes = []
+        
+        for obj_pred_i, cls_pred_i, box_pred_i in zip(obj_preds, cls_preds, box_preds):
+            obj_pred_i = obj_pred_i[0]
+            cls_pred_i = cls_pred_i[0]
+            box_pred_i = box_pred_i[0]
+            if self.no_multi_labels:
+                # [M,]
+                scores, labels = torch.max(
+                    torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid()), dim=1)
+
+                # Keep top k top scoring indices only.
+                num_topk = min(self.topk_candidates, box_pred_i.size(0))
+
+                # topk candidates
+                predicted_prob, topk_idxs = scores.sort(descending=True)
+                topk_scores = predicted_prob[:num_topk]
+                topk_idxs = topk_idxs[:num_topk]
+
+                # filter out the proposals with low confidence score
+                keep_idxs = topk_scores > self.conf_thresh
+                scores = topk_scores[keep_idxs]
+                topk_idxs = topk_idxs[keep_idxs]
+
+                labels = labels[topk_idxs]
+                bboxes = box_pred_i[topk_idxs]
+            else:
+                # [M, C] -> [MC,]
+                scores_i = torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid()).flatten()
+
+                # Keep top k top scoring indices only.
+                num_topk = min(self.topk_candidates, box_pred_i.size(0))
+
+                # torch.sort is actually faster than .topk (at least on GPUs)
+                predicted_prob, topk_idxs = scores_i.sort(descending=True)
+                topk_scores = predicted_prob[:num_topk]
+                topk_idxs = topk_idxs[:num_topk]
+
+                # filter out the proposals with low confidence score
+                keep_idxs = topk_scores > self.conf_thresh
+                scores = topk_scores[keep_idxs]
+                topk_idxs = topk_idxs[keep_idxs]
+
+                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
+                labels = topk_idxs % self.num_classes
+
+                bboxes = box_pred_i[anchor_idxs]
+
+            all_scores.append(scores)
+            all_labels.append(labels)
+            all_bboxes.append(bboxes)
+
+        scores = torch.cat(all_scores, dim=0)
+        labels = torch.cat(all_labels, dim=0)
+        bboxes = torch.cat(all_bboxes, dim=0)
+
+        # to cpu & numpy
+        scores = scores.cpu().numpy()
+        labels = labels.cpu().numpy()
+        bboxes = bboxes.cpu().numpy()
+
+        # nms
+        scores, labels, bboxes = multiclass_nms(
+            scores, labels, bboxes, self.nms_thresh, self.num_classes)
+        
+        return bboxes, scores, labels
+    
+    def forward(self, x):
+        # ---------------- Backbone ----------------
+        x = self.backbone(x)
+
+        # ---------------- Neck ----------------
+        x = self.neck(x)
+
+        # ---------------- Heads ----------------
+        cls_feats, reg_feats = self.head(x)
+
+        # ---------------- Preds ----------------
+        outputs = self.pred(cls_feats, reg_feats)
+        outputs['image_size'] = [x.shape[2], x.shape[3]]
+
+        if not self.training:
+            all_obj_preds = [outputs['pred_obj'],]
+            all_cls_preds = [outputs['pred_cls'],]
+            all_box_preds = [outputs['pred_box'],]
+
+            # post process
+            bboxes, scores, labels = self.post_process(
+                all_obj_preds, all_cls_preds, all_box_preds)
+            outputs = {
+                "scores": scores,
+                "labels": labels,
+                "bboxes": bboxes
+            }
+        
+        return outputs 

+ 209 - 0
models/yolov2/yolov2_backbone.py

@@ -0,0 +1,209 @@
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+
+try:
+    from .yolov2_basic import conv1x1, BasicBlock, Bottleneck
+except:
+    from  yolov2_basic import conv1x1, BasicBlock, Bottleneck
+
+__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
+           'resnet152']
+
+
+model_urls = {
+    'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
+    'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
+    'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
+    'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
+    'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
+}
+
+
+# --------------------- Yolov2's Backbone -----------------------
+class Yolov2Backbone(nn.Module):
+    def __init__(self, cfg):
+        super().__init__()
+        self.backbone, self.feat_dim = build_resnet(cfg.backbone, cfg.use_pretrained)
+
+    def forward(self, x):
+        c5 = self.backbone(x)
+
+        return c5
+
+
+# --------------------- ResNet -----------------------
+class ResNet(nn.Module):
+
+    def __init__(self, block, layers, zero_init_residual=False):
+        super(ResNet, self).__init__()
+        self.inplanes = 64
+        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
+                               bias=False)
+        self.bn1 = nn.BatchNorm2d(64)
+        self.relu = nn.ReLU(inplace=True)
+        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+        self.layer1 = self._make_layer(block, 64, layers[0])
+        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
+        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
+
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+
+        # Zero-initialize the last BN in each residual branch,
+        # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+        # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
+        if zero_init_residual:
+            for m in self.modules():
+                if isinstance(m, Bottleneck):
+                    nn.init.constant_(m.bn3.weight, 0)
+                elif isinstance(m, BasicBlock):
+                    nn.init.constant_(m.bn2.weight, 0)
+
+    def _make_layer(self, block, planes, blocks, stride=1):
+        downsample = None
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            downsample = nn.Sequential(
+                conv1x1(self.inplanes, planes * block.expansion, stride),
+                nn.BatchNorm2d(planes * block.expansion),
+            )
+
+        layers = []
+        layers.append(block(self.inplanes, planes, stride, downsample))
+        self.inplanes = planes * block.expansion
+        for _ in range(1, blocks):
+            layers.append(block(self.inplanes, planes))
+
+        return nn.Sequential(*layers)
+
+    def forward(self, x):
+        """
+        Input:
+            x: (Tensor) -> [B, C, H, W]
+        Output:
+            c5: (Tensor) -> [B, C, H/32, W/32]
+        """
+        c1 = self.conv1(x)     # [B, C, H/2, W/2]
+        c1 = self.bn1(c1)      # [B, C, H/2, W/2]
+        c1 = self.relu(c1)     # [B, C, H/2, W/2]
+        c2 = self.maxpool(c1)  # [B, C, H/4, W/4]
+
+        c2 = self.layer1(c2)   # [B, C, H/4, W/4]
+        c3 = self.layer2(c2)   # [B, C, H/8, W/8]
+        c4 = self.layer3(c3)   # [B, C, H/16, W/16]
+        c5 = self.layer4(c4)   # [B, C, H/32, W/32]
+
+        return c5
+
+
+# --------------------- Functions -----------------------
+def build_resnet(model_name="resnet18", pretrained=False):
+    if model_name == 'resnet18':
+        model = resnet18(pretrained)
+        feat_dim = 512
+    elif model_name == 'resnet34':
+        model = resnet34(pretrained)
+        feat_dim = 512
+    elif model_name == 'resnet50':
+        model = resnet50(pretrained)
+        feat_dim = 2048
+    elif model_name == 'resnet101':
+        model = resnet34(pretrained)
+        feat_dim = 2048
+    else:
+        raise NotImplementedError("Unknown resnet: {}".format(model_name))
+    
+    return model, feat_dim
+
+def resnet18(pretrained=False, **kwargs):
+    """Constructs a ResNet-18 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
+    if pretrained:
+        # strict = False as we don't need fc layer params.
+        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
+    return model
+
+def resnet34(pretrained=False, **kwargs):
+    """Constructs a ResNet-34 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
+    if pretrained:
+        model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
+    return model
+
+def resnet50(pretrained=False, **kwargs):
+    """Constructs a ResNet-50 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
+    if pretrained:
+        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
+    return model
+
+def resnet101(pretrained=False, **kwargs):
+    """Constructs a ResNet-101 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
+    if pretrained:
+        model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
+    return model
+
+def resnet152(pretrained=False, **kwargs):
+    """Constructs a ResNet-152 model.
+
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
+    if pretrained:
+        model.load_state_dict(model_zoo.load_url(model_urls['resnet152']), strict=False)
+    return model
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # YOLOv8-Base config
+    class Yolov2BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Backbone
+            self.backbone       = 'resnet18'
+            self.use_pretrained = True
+
+    cfg = Yolov2BaseConfig()
+    # Build backbone
+    model = Yolov2Backbone(cfg)
+
+    # Inference
+    x = torch.randn(1, 3, 640, 640)
+    t0 = time.time()
+    output = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print(output.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))    

+ 147 - 0
models/yolov2/yolov2_basic.py

@@ -0,0 +1,147 @@
+import torch
+import torch.nn as nn
+from typing import List
+
+
+# --------------------- Basic modules ---------------------
+def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
+    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
+
+    return conv
+
+def get_activation(act_type=None):
+    if act_type == 'relu':
+        return nn.ReLU(inplace=True)
+    elif act_type == 'lrelu':
+        return nn.LeakyReLU(0.1, inplace=True)
+    elif act_type == 'mish':
+        return nn.Mish(inplace=True)
+    elif act_type == 'silu':
+        return nn.SiLU(inplace=True)
+    elif act_type is None:
+        return nn.Identity()
+    else:
+        raise NotImplementedError
+        
+def get_norm(norm_type, dim):
+    if norm_type == 'BN':
+        return nn.BatchNorm2d(dim)
+    elif norm_type == 'GN':
+        return nn.GroupNorm(num_groups=32, num_channels=dim)
+    elif norm_type is None:
+        return nn.Identity()
+    else:
+        raise NotImplementedError
+
+class BasicConv(nn.Module):
+    def __init__(self, 
+                 in_dim,                   # in channels
+                 out_dim,                  # out channels 
+                 kernel_size=1,            # kernel size 
+                 padding=0,                # padding
+                 stride=1,                 # padding
+                 dilation=1,               # dilation
+                 act_type  :str = 'lrelu', # activation
+                 norm_type :str = 'BN',    # normalization
+                 depthwise :bool = False
+                ):
+        super(BasicConv, self).__init__()
+        self.depthwise = depthwise
+        if not depthwise:
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.norm = get_norm(norm_type, out_dim)
+        else:
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.norm1 = get_norm(norm_type, in_dim)
+            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
+            self.norm2 = get_norm(norm_type, out_dim)
+        self.act  = get_activation(act_type)
+
+    def forward(self, x):
+        if not self.depthwise:
+            return self.act(self.norm(self.conv(x)))
+        else:
+            # Depthwise conv
+            x = self.norm1(self.conv1(x))
+            # Pointwise conv
+            x = self.norm2(self.conv2(x))
+            return x
+
+
+# --------------------- ResNet modules ---------------------
+def conv3x3(in_planes, out_planes, stride=1):
+    """3x3 convolution with padding"""
+    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
+                     padding=1, bias=False)
+
+def conv1x1(in_planes, out_planes, stride=1):
+    """1x1 convolution"""
+    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
+
+class BasicBlock(nn.Module):
+    expansion = 1
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None):
+        super(BasicBlock, self).__init__()
+        self.conv1 = conv3x3(inplanes, planes, stride)
+        self.bn1 = nn.BatchNorm2d(planes)
+        self.relu = nn.ReLU(inplace=True)
+        self.conv2 = conv3x3(planes, planes)
+        self.bn2 = nn.BatchNorm2d(planes)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        identity = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+
+        if self.downsample is not None:
+            identity = self.downsample(x)
+
+        out += identity
+        out = self.relu(out)
+
+        return out
+
+class Bottleneck(nn.Module):
+    expansion = 4
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None):
+        super(Bottleneck, self).__init__()
+        self.conv1 = conv1x1(inplanes, planes)
+        self.bn1 = nn.BatchNorm2d(planes)
+        self.conv2 = conv3x3(planes, planes, stride)
+        self.bn2 = nn.BatchNorm2d(planes)
+        self.conv3 = conv1x1(planes, planes * self.expansion)
+        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
+        self.relu = nn.ReLU(inplace=True)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        identity = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+        out = self.relu(out)
+
+        out = self.conv3(out)
+        out = self.bn3(out)
+
+        if self.downsample is not None:
+            identity = self.downsample(x)
+
+        out += identity
+        out = self.relu(out)
+
+        return out

+ 121 - 0
models/yolov2/yolov2_head.py

@@ -0,0 +1,121 @@
+import torch
+import torch.nn as nn
+
+try:
+    from .yolov2_basic import BasicConv
+except:
+    from  yolov2_basic import BasicConv
+
+
+class Yolov2DetHead(nn.Module):
+    def __init__(self, cfg, in_dim: int = 256):
+        super().__init__()
+        # --------- Basic Parameters ----------
+        self.in_dim = in_dim
+        self.cls_head_dim = cfg.head_dim
+        self.reg_head_dim = cfg.head_dim
+        self.num_cls_head = cfg.num_cls_head
+        self.num_reg_head = cfg.num_reg_head
+        self.act_type     = cfg.head_act
+        self.norm_type    = cfg.head_norm
+        self.depthwise    = cfg.head_depthwise
+        
+        # --------- Network Parameters ----------
+        ## cls head
+        cls_feats = []
+        for i in range(self.num_cls_head):
+            if i == 0:
+                cls_feats.append(
+                    BasicConv(in_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type  = self.act_type,
+                              norm_type = self.norm_type,
+                              depthwise = self.depthwise)
+                              )
+            else:
+                cls_feats.append(
+                    BasicConv(self.cls_head_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type  = self.act_type,
+                              norm_type = self.norm_type,
+                              depthwise = self.depthwise)
+                              )
+        ## reg head
+        reg_feats = []
+        for i in range(self.num_reg_head):
+            if i == 0:
+                reg_feats.append(
+                    BasicConv(in_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type  = self.act_type,
+                              norm_type = self.norm_type,
+                              depthwise = self.depthwise)
+                              )
+            else:
+                reg_feats.append(
+                    BasicConv(self.reg_head_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type  = self.act_type,
+                              norm_type = self.norm_type,
+                              depthwise = self.depthwise)
+                              )
+        self.cls_feats = nn.Sequential(*cls_feats)
+        self.reg_feats = nn.Sequential(*reg_feats)
+
+        self.init_weights()
+        
+    def init_weights(self):
+        """Initialize the parameters."""
+        for m in self.modules():
+            if isinstance(m, torch.nn.Conv2d):
+                # In order to be consistent with the source code,
+                # reset the Conv2d initialization parameters
+                m.reset_parameters()
+
+    def forward(self, x):
+        """
+            in_feats: (Tensor) [B, C, H, W]
+        """
+        cls_feats = self.cls_feats(x)
+        reg_feats = self.reg_feats(x)
+
+        return cls_feats, reg_feats
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov1BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Head
+            self.head_act  = 'lrelu'
+            self.head_norm = 'BN'
+            self.head_depthwise = False
+            self.head_dim  = 256
+            self.num_cls_head   = 2
+            self.num_reg_head   = 2
+
+    cfg = Yolov1BaseConfig()
+    # Build a head
+    head = Yolov2DetHead(cfg, 512)
+
+
+    # Inference
+    x = torch.randn(1, 512, 20, 20)
+    t0 = time.time()
+    cls_feat, reg_feat = head(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print(cls_feat.shape, reg_feat.shape)
+
+    print('==============================')
+    flops, params = profile(head, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))    

+ 33 - 0
models/yolov2/yolov2_neck.py

@@ -0,0 +1,33 @@
+import torch
+import torch.nn as nn
+
+from .yolov2_basic import BasicConv
+
+
+# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+class SPPF(nn.Module):
+    """
+        This code referenced to https://github.com/ultralytics/yolov5
+    """
+    def __init__(self, cfg, in_dim, out_dim):
+        super().__init__()
+        ## ----------- Basic Parameters -----------
+        inter_dim = round(in_dim * cfg.neck_expand_ratio)
+        self.out_dim = out_dim
+        ## ----------- Network Parameters -----------
+        self.cv1 = BasicConv(in_dim, inter_dim,
+                             kernel_size=1, padding=0, stride=1,
+                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
+        self.cv2 = BasicConv(inter_dim * 4, out_dim,
+                             kernel_size=1, padding=0, stride=1,
+                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
+        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
+                              stride=1,
+                              padding=cfg.spp_pooling_size // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        y1 = self.m(x)
+        y2 = self.m(y1)
+
+        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))

+ 102 - 0
models/yolov2/yolov2_pred.py

@@ -0,0 +1,102 @@
+import torch
+import torch.nn as nn
+
+
+# -------------------- Detection Pred Layer --------------------
+## Single-level pred layer
+class Yolov2DetPredLayer(nn.Module):
+    def __init__(self, cfg, num_classes):
+        super().__init__()
+        # --------- Basic Parameters ----------
+        self.stride  = cfg.out_stride
+        self.cls_dim = cfg.head_dim
+        self.reg_dim = cfg.head_dim
+        self.num_classes = cfg.num_classes
+        # ------------------- Anchor box -------------------
+        self.anchor_size = torch.as_tensor(cfg.anchor_sizes).float().view(-1, 2) # [A, 2]
+        self.num_anchors = self.anchor_size.shape[0]
+
+        # --------- Network Parameters ----------
+        self.obj_pred = nn.Conv2d(self.cls_dim, 1 * self.num_anchors, kernel_size=1)
+        self.cls_pred = nn.Conv2d(self.cls_dim, num_classes * self.num_anchors, kernel_size=1)
+        self.reg_pred = nn.Conv2d(self.reg_dim, 4 * self.num_anchors, kernel_size=1)                
+
+        self.init_bias()
+        
+    def init_bias(self):
+        # Init bias
+        init_prob = 0.01
+        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
+        # obj pred
+        b = self.obj_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        self.obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+        # cls pred
+        b = self.cls_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+        # reg pred
+        b = self.reg_pred.bias.view(-1, )
+        b.data.fill_(1.0)
+        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+    def generate_anchors(self, fmp_size):
+        """
+            fmp_size: (List) [H, W]
+        """
+        # 特征图的宽和高
+        fmp_h, fmp_w = fmp_size
+
+        # 生成网格的x坐标和y坐标
+        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
+
+        # 将xy两部分的坐标拼起来:[H, W, 2] -> [HW, 2]
+        anchor_xy = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
+        # [HW, 2] -> [HW, A, 2] -> [M, 2], M=HWA
+        anchor_xy = anchor_xy.unsqueeze(1).repeat(1, self.num_anchors, 1)
+        anchor_xy = anchor_xy.view(-1, 2)
+
+        # [A, 2] -> [1, A, 2] -> [HW, A, 2] -> [M, 2], M=HWA
+        anchor_wh = self.anchor_size.unsqueeze(0).repeat(fmp_h*fmp_w, 1, 1)
+        anchor_wh = anchor_wh.view(-1, 2)
+
+        anchors = torch.cat([anchor_xy, anchor_wh], dim=-1)
+
+        return anchors
+        
+    def forward(self, cls_feat, reg_feat):
+        # 预测层
+        obj_pred = self.obj_pred(cls_feat)
+        cls_pred = self.cls_pred(cls_feat)
+        reg_pred = self.reg_pred(reg_feat)
+
+        # 生成网格坐标
+        B, _, H, W = cls_pred.size()
+        fmp_size = [H, W]
+        anchors = self.generate_anchors(fmp_size)
+        anchors = anchors.to(cls_pred.device)
+
+        # 对 pred 的size做一些view调整,便于后续的处理
+        # [B, C*A, H, W] -> [B, H, W, C*A] -> [B, H*W*A, C]
+        obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 1)
+        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
+        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
+        
+        # 解算边界框坐标
+        cxcy_pred = (torch.sigmoid(reg_pred[..., :2]) + anchors[..., :2]) * self.stride
+        bwbh_pred = torch.exp(reg_pred[..., 2:]) * anchors[..., 2:]
+        pred_x1y1 = cxcy_pred - bwbh_pred * 0.5
+        pred_x2y2 = cxcy_pred + bwbh_pred * 0.5
+        box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
+
+        # output dict
+        outputs = {"pred_obj": obj_pred,       # (torch.Tensor) [B, M, 1]
+                   "pred_cls": cls_pred,       # (torch.Tensor) [B, M, C]
+                   "pred_reg": reg_pred,       # (torch.Tensor) [B, M, 4]
+                   "pred_box": box_pred,       # (torch.Tensor) [B, M, 4]
+                   "anchors" : anchors,        # (torch.Tensor) [M, 2]
+                   "fmp_size": fmp_size,
+                   "stride"  : self.stride,    # (Int)
+                   }
+
+        return outputs