yjh0410 vor 2 Jahren
Ursprung
Commit
a0db6b97b7

+ 4 - 3
config/__init__.py

@@ -1,5 +1,6 @@
 # ------------------ Model Config ----------------------
 from .yolov1_config import yolov1_cfg
+from .yolov2_config import yolov2_cfg
 
 
 def build_model_config(args):
@@ -8,9 +9,9 @@ def build_model_config(args):
     # YOLOv1
     if args.model == 'yolov1':
         cfg = yolov1_cfg
-    # # YOLOv2
-    # elif args.model == 'yolov2':
-    #     cfg = yolov2_cfg
+    # YOLOv2
+    elif args.model == 'yolov2':
+        cfg = yolov2_cfg
     # # YOLOv3
     # elif args.model == 'yolov3':
     #     cfg = yolov3_cfg

+ 44 - 0
config/yolov2_config.py

@@ -0,0 +1,44 @@
+# YOLOv2 Config
+
+yolov2_cfg = {
+    # input
+    'trans_type': 'ssd',
+    # model
+    'backbone': 'darknet19',
+    'pretrained': True,
+    'stride': 32,  # P5
+    # neck
+    'neck': 'sppf',
+    'expand_ratio': 0.5,
+    'pooling_size': 5,
+    'neck_act': 'lrelu',
+    'neck_norm': 'BN',
+    'neck_depthwise': False,
+    # head
+    'head': 'decoupled_head',
+    'head_act': 'silu',
+    'head_norm': 'BN',
+    'num_cls_head': 2,
+    'num_reg_head': 2,
+    'head_depthwise': False,
+    # loss weight
+    'loss_obj_weight': 1.0,
+    'loss_cls_weight': 1.0,
+    'loss_box_weight': 5.0,
+    # training configuration
+    'no_aug_epoch': -1,
+    # optimizer
+    'optimizer': 'sgd',        # optional: sgd, adam, adamw
+    'momentum': 0.937,         # SGD: 0.937;    AdamW: invalid
+    'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+    'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+    # model EMA
+    'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+    'ema_tau': 2000,
+    # lr schedule
+    'scheduler': 'linear',
+    'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.004
+    'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.05
+    'warmup_momentum': 0.8,
+    'warmup_bias_lr': 0.1,
+}

+ 2 - 2
engine.py

@@ -179,7 +179,7 @@ def val_one_epoch(args,
         if evaluator is None:
             print('No evaluator ... save model and go on training.')
             print('Saving state, epoch: {}'.format(epoch + 1))
-            weight_name = '{}_epoch_{}.pth'.format(args.version, epoch + 1)
+            weight_name = '{}_epoch_{}.pth'.format(args.model, epoch + 1)
             checkpoint_path = os.path.join(path_to_save, weight_name)
             torch.save({'model': model.state_dict(),
                         'mAP': -1.,
@@ -203,7 +203,7 @@ def val_one_epoch(args,
                 best_map = cur_map
                 # save model
                 print('Saving state, epoch:', epoch + 1)
-                weight_name = '{}_epoch_{}_{:.2f}.pth'.format(args.version, epoch + 1, best_map*100)
+                weight_name = '{}_epoch_{}_{:.2f}.pth'.format(args.model, epoch + 1, best_map*100)
                 checkpoint_path = os.path.join(path_to_save, weight_name)
                 torch.save({'model': model.state_dict(),
                             'mAP': round(best_map*100, 1),

+ 4 - 4
models/yolov1/yolov1.py

@@ -31,18 +31,18 @@ class YOLOv1(nn.Module):
         self.stride = 32                               # 网络的最大步长
         
         # ------------------- Network Structure -------------------
-        ## backbone: resnet18
+        ## 主干网络
         self.backbone, feat_dim = build_backbone(
             cfg['backbone'], trainable&cfg['pretrained'])
 
-        ## neck: SPP
+        ## 颈部网络
         self.neck = build_neck(cfg, feat_dim, out_dim=256)
         head_dim = self.neck.out_dim
 
-        ## head
+        ## 检测头
         self.head = build_head(cfg, head_dim, head_dim, num_classes)
 
-        ## pred
+        ## 预测曾
         self.obj_pred = nn.Conv2d(head_dim, 1, kernel_size=1)
         self.cls_pred = nn.Conv2d(head_dim, num_classes, kernel_size=1)
         self.reg_pred = nn.Conv2d(head_dim, 4, kernel_size=1)

+ 31 - 0
models/yolov2/build.py

@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+
+from .loss import build_criterion
+from .yolov2 import YOLOv2
+
+
+# build object detector
+def build_yolov2(args, cfg, device, num_classes=80, trainable=False):
+    print('==============================')
+    print('Build {} ...'.format(args.model.upper()))
+    
+    print('==============================')
+    print('Model Configuration: \n', cfg)
+    
+    model = YOLOv2(
+        cfg = cfg,
+        device = device,
+        img_size = args.img_size,
+        num_classes = num_classes,
+        conf_thresh = args.conf_thresh,
+        nms_thresh = args.nms_thresh,
+        trainable = trainable
+        )
+
+    criterion = None
+    if trainable:
+        # build criterion for training
+        criterion = build_criterion(cfg, device, num_classes)
+
+    return model, criterion

+ 113 - 0
models/yolov2/loss.py

@@ -0,0 +1,113 @@
+import torch
+import torch.nn.functional as F
+from .matcher import YoloMatcher
+from utils.box_ops import get_ious
+from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
+
+
+class Criterion(object):
+    def __init__(self, cfg, device, num_classes=80):
+        self.cfg = cfg
+        self.device = device
+        self.num_classes = num_classes
+        self.loss_obj_weight = cfg['loss_obj_weight']
+        self.loss_cls_weight = cfg['loss_cls_weight']
+        self.loss_box_weight = cfg['loss_box_weight']
+
+        # matcher
+        self.matcher = YoloMatcher(num_classes=num_classes)
+
+
+    def loss_objectness(self, pred_obj, gt_obj):
+        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
+
+        return loss_obj
+    
+
+    def loss_classes(self, pred_cls, gt_label):
+        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
+
+        return loss_cls
+
+
+    def loss_bboxes(self, pred_box, gt_box):
+        # regression loss
+        ious = get_ious(pred_box,
+                        gt_box,
+                        box_mode="xyxy",
+                        iou_type='giou')
+        loss_box = 1.0 - ious
+
+        return loss_box
+
+
+    def __call__(self, outputs, targets):
+        device = outputs['pred_cls'][0].device
+        stride = outputs['stride']
+        fmp_size = outputs['fmp_size']
+        (
+            gt_objectness, 
+            gt_classes, 
+            gt_bboxes,
+            ) = self.matcher(fmp_size=fmp_size, 
+                             stride=stride, 
+                             targets=targets)
+        # List[B, M, C] -> [B, M, C] -> [BM, C]
+        pred_obj = outputs['pred_obj'].view(-1)                     # [BM,]
+        pred_cls = outputs['pred_cls'].view(-1, self.num_classes)   # [BM, C]
+        pred_box = outputs['pred_box'].view(-1, 4)                  # [BM, 4]
+       
+        gt_objectness = gt_objectness.view(-1).to(device).float()               # [BM,]
+        gt_classes = gt_classes.view(-1, self.num_classes).to(device).float()   # [BM, C]
+        gt_bboxes = gt_bboxes.view(-1, 4).to(device).float()                    # [BM, 4]
+
+        pos_masks = (gt_objectness > 0)
+        num_fgs = pos_masks.sum()
+
+        if is_dist_avail_and_initialized():
+            torch.distributed.all_reduce(num_fgs)
+        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
+
+        # obj loss
+        loss_obj = self.loss_objectness(pred_obj, gt_objectness)
+        loss_obj = loss_obj.sum() / num_fgs
+
+        # cls loss
+        pred_cls_pos = pred_cls[pos_masks]
+        gt_classes_pos = gt_classes[pos_masks]
+        loss_cls = self.loss_classes(pred_cls_pos, gt_classes_pos)
+        loss_cls = loss_cls.sum() / num_fgs
+
+        # box loss
+        pred_box_pos = pred_box[pos_masks]
+        gt_bboxes_pos = gt_bboxes[pos_masks]
+        loss_box = self.loss_bboxes(pred_box_pos, gt_bboxes_pos)
+        loss_box = loss_box.sum() / num_fgs
+        
+        # total loss
+        losses = self.loss_obj_weight * loss_obj + \
+                 self.loss_cls_weight * loss_cls + \
+                 self.loss_box_weight * loss_box
+
+        loss_dict = dict(
+                loss_obj = loss_obj,
+                loss_cls = loss_cls,
+                loss_box = loss_box,
+                losses = losses
+        )
+
+        return loss_dict
+    
+
+def build_criterion(cfg, device, num_classes):
+    criterion = Criterion(
+        cfg=cfg,
+        device=device,
+        num_classes=num_classes
+        )
+
+    return criterion
+
+    
+if __name__ == "__main__":
+    pass

+ 64 - 0
models/yolov2/matcher.py

@@ -0,0 +1,64 @@
+import torch
+import numpy as np
+
+
+class YoloMatcher(object):
+    def __init__(self, num_classes):
+        self.num_classes = num_classes
+
+
+    @torch.no_grad()
+    def __call__(self, fmp_size, stride, targets):
+        """
+            img_size: (Int) input image size
+            stride: (Int) -> stride of YOLOv1 output.
+            targets: (Dict) dict{'boxes': [...], 
+                                 'labels': [...], 
+                                 'orig_size': ...}
+        """
+        # prepare
+        bs = len(targets)
+        fmp_h, fmp_w = fmp_size
+        gt_objectness = np.zeros([bs, fmp_h, fmp_w, 1]) 
+        gt_classes = np.zeros([bs, fmp_h, fmp_w, self.num_classes]) 
+        gt_bboxes = np.zeros([bs, fmp_h, fmp_w, 4])
+
+        for batch_index in range(bs):
+            targets_per_image = targets[batch_index]
+            # [N,]
+            tgt_cls = targets_per_image["labels"].numpy()
+            # [N, 4]
+            tgt_box = targets_per_image['boxes'].numpy()
+
+            for gt_box, gt_label in zip(tgt_box, tgt_cls):
+                x1, y1, x2, y2 = gt_box
+                # xyxy -> cxcywh
+                xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
+                bw, bh = x2 - x1, y2 - y1
+
+                # check
+                if bw < 1. or bh < 1.:
+                    return False    
+
+                # grid
+                xs_c = xc / stride
+                ys_c = yc / stride
+                grid_x = int(xs_c)
+                grid_y = int(ys_c)
+
+                if grid_x < fmp_w and grid_y < fmp_h:
+                    gt_objectness[batch_index, grid_y, grid_x] = 1.0
+                    gt_classes[batch_index, grid_y, grid_x, int(gt_label)] = 1.0
+                    gt_bboxes[batch_index, grid_y, grid_x] = np.array([x1, y1, x2, y2])
+
+        # [B, M, C]
+        gt_objectness = gt_objectness.reshape(bs, -1, 1)
+        gt_classes = gt_classes.reshape(bs, -1, self.num_classes)
+        gt_bboxes = gt_bboxes.reshape(bs, -1, 4)
+
+        # to tensor
+        gt_objectness = torch.from_numpy(gt_objectness).float()
+        gt_classes = torch.from_numpy(gt_classes).float()
+        gt_bboxes = torch.from_numpy(gt_bboxes).float()
+
+        return gt_objectness, gt_classes, gt_bboxes

+ 209 - 0
models/yolov2/yolov2.py

@@ -0,0 +1,209 @@
+import torch
+import torch.nn as nn
+import numpy as np
+
+from utils.nms import multiclass_nms
+
+from .yolov2_backbone import build_backbone
+from .yolov2_neck import build_neck
+from .yolov2_head import build_head
+
+
+# YOLOv2
+class YOLOv2(nn.Module):
+    def __init__(self,
+                 cfg,
+                 device,
+                 img_size=None,
+                 num_classes=20,
+                 conf_thresh=0.01,
+                 nms_thresh=0.5,
+                 trainable=False):
+        super(YOLOv2, self).__init__()
+        # ------------------- Basic parameters -------------------
+        self.cfg = cfg                                 # 模型配置文件
+        self.img_size = img_size                       # 输入图像大小
+        self.device = device                           # cuda或者是cpu
+        self.num_classes = num_classes                 # 类别的数量
+        self.trainable = trainable                     # 训练的标记
+        self.conf_thresh = conf_thresh                 # 得分阈值
+        self.nms_thresh = nms_thresh                   # NMS阈值
+        self.stride = 32                               # 网络的最大步长
+        
+        # ------------------- Network Structure -------------------
+        ## 主干网络
+        self.backbone, feat_dim = build_backbone(
+            cfg['backbone'], trainable&cfg['pretrained'])
+
+        ## 颈部网络
+        self.neck = build_neck(cfg, feat_dim, out_dim=256)
+        head_dim = self.neck.out_dim
+
+        ## 检测头
+        self.head = build_head(cfg, head_dim, head_dim, num_classes)
+
+        ## 预测曾
+        self.obj_pred = nn.Conv2d(head_dim, 1, kernel_size=1)
+        self.cls_pred = nn.Conv2d(head_dim, num_classes, kernel_size=1)
+        self.reg_pred = nn.Conv2d(head_dim, 4, kernel_size=1)
+    
+
+        if self.trainable:
+            self.init_bias()
+
+
+    def init_bias(self):
+        # init bias
+        init_prob = 0.01
+        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
+        nn.init.constant_(self.obj_pred.bias, bias_value)
+        nn.init.constant_(self.cls_pred.bias, bias_value)
+
+
+    def create_grid(self, fmp_size):
+        """ 
+            用于生成G矩阵,其中每个元素都是特征图上的像素坐标。
+        """
+        # 特征图的宽和高
+        ws, hs = fmp_size
+
+        # 生成网格的x坐标和y坐标
+        grid_y, grid_x = torch.meshgrid([torch.arange(hs), torch.arange(ws)])
+
+        # 将xy两部分的坐标拼起来:[H, W, 2]
+        grid_xy = torch.stack([grid_x, grid_y], dim=-1).float()
+
+        # [H, W, 2] -> [HW, 2] -> [HW, 2]
+        grid_xy = grid_xy.view(-1, 2).to(self.device)
+        
+        return grid_xy
+
+
+    def decode_boxes(self, pred, fmp_size):
+        """
+            将txtytwth转换为常用的x1y1x2y2形式。
+        """
+        # 生成网格坐标矩阵
+        grid_cell = self.create_grid(fmp_size)
+
+        # 计算预测边界框的中心点坐标和宽高
+        pred_ctr = (torch.sigmoid(pred[..., :2]) + grid_cell) * self.stride
+        pred_wh = torch.exp(pred[..., 2:]) * self.stride
+
+        # 将所有bbox的中心带你坐标和宽高换算成x1y1x2y2形式
+        pred_x1y1 = pred_ctr - pred_wh * 0.5
+        pred_x2y2 = pred_ctr + pred_wh * 0.5
+        pred_box = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
+
+        return pred_box
+
+
+    def postprocess(self, bboxes, scores):
+        """
+        Input:
+            bboxes: [HxW, 4]
+            scores: [HxW, num_classes]
+        Output:
+            bboxes: [N, 4]
+            score:  [N,]
+            labels: [N,]
+        """
+
+        labels = np.argmax(scores, axis=1)
+        scores = scores[(np.arange(scores.shape[0]), labels)]
+        
+        # threshold
+        keep = np.where(scores >= self.conf_thresh)
+        bboxes = bboxes[keep]
+        scores = scores[keep]
+        labels = labels[keep]
+
+        # nms
+        scores, labels, bboxes = multiclass_nms(
+            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
+
+        return bboxes, scores, labels
+
+
+    @torch.no_grad()
+    def inference(self, x):
+        # 主干网络
+        feat = self.backbone(x)
+
+        # 颈部网络
+        feat = self.neck(feat)
+
+        # 检测头
+        cls_feat, reg_feat = self.head(feat)
+
+        # 预测层
+        obj_pred = self.obj_pred(cls_feat)
+        cls_pred = self.cls_pred(cls_feat)
+        reg_pred = self.reg_pred(reg_feat)
+        fmp_size = obj_pred.shape[-2:]
+
+        # 对 pred 的size做一些view调整,便于后续的处理
+        # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
+        obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+
+        # 测试时,笔者默认batch是1,
+        # 因此,我们不需要用batch这个维度,用[0]将其取走。
+        obj_pred = obj_pred[0]       # [H*W, 1]
+        cls_pred = cls_pred[0]       # [H*W, NC]
+        reg_pred = reg_pred[0]       # [H*W, 4]
+
+        # 每个边界框的得分
+        scores = torch.sqrt(obj_pred.sigmoid() * cls_pred.sigmoid())
+        
+        # 解算边界框, 并归一化边界框: [H*W, 4]
+        bboxes = self.decode_boxes(reg_pred, fmp_size)
+        
+        # 将预测放在cpu处理上,以便进行后处理
+        scores = scores.cpu().numpy()
+        bboxes = bboxes.cpu().numpy()
+        
+        # 后处理
+        bboxes, scores, labels = self.postprocess(bboxes, scores)
+
+        return bboxes, scores, labels
+
+
+    def forward(self, x):
+        if not self.trainable:
+            return self.inference(x)
+        else:
+            # 主干网络
+            feat = self.backbone(x)
+
+            # 颈部网络
+            feat = self.neck(feat)
+
+            # 检测头
+            cls_feat, reg_feat = self.head(feat)
+
+            # 预测层
+            obj_pred = self.obj_pred(cls_feat)
+            cls_pred = self.cls_pred(cls_feat)
+            reg_pred = self.reg_pred(reg_feat)
+            fmp_size = obj_pred.shape[-2:]
+
+            # 对 pred 的size做一些view调整,便于后续的处理
+            # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
+            obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+
+            # decode bbox
+            box_pred = self.decode_boxes(reg_pred, fmp_size)
+
+            # 网络输出
+            outputs = {"pred_obj": obj_pred,                  # (Tensor) [B, M, 1]
+                       "pred_cls": cls_pred,                   # (Tensor) [B, M, C]
+                       "pred_box": box_pred,                   # (Tensor) [B, M, 4]
+                       "stride": self.stride,                  # (Int)
+                       "fmp_size": fmp_size                    # (List) [fmp_h, fmp_w]
+                       }           
+            return outputs
+        

+ 127 - 0
models/yolov2/yolov2_backbone.py

@@ -0,0 +1,127 @@
+import torch
+import torch.nn as nn
+import os
+
+model_urls = {
+    "darknet19": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/darknet19.pth",
+}
+
+
+__all__ = ['darknet19']
+
+
+class Conv_BN_LeakyReLU(nn.Module):
+    def __init__(self, in_channels, out_channels, ksize, padding=0, stride=1, dilation=1):
+        super(Conv_BN_LeakyReLU, self).__init__()
+        self.convs = nn.Sequential(
+            nn.Conv2d(in_channels, out_channels, ksize, padding=padding, stride=stride, dilation=dilation),
+            nn.BatchNorm2d(out_channels),
+            nn.LeakyReLU(0.1, inplace=True)
+        )
+
+    def forward(self, x):
+        return self.convs(x)
+
+
+class DarkNet19(nn.Module):
+    def __init__(self):
+        
+        super(DarkNet19, self).__init__()
+        # backbone network : DarkNet-19
+        # output : stride = 2, c = 32
+        self.conv_1 = nn.Sequential(
+            Conv_BN_LeakyReLU(3, 32, 3, 1),
+            nn.MaxPool2d((2,2), 2),
+        )
+
+        # output : stride = 4, c = 64
+        self.conv_2 = nn.Sequential(
+            Conv_BN_LeakyReLU(32, 64, 3, 1),
+            nn.MaxPool2d((2,2), 2)
+        )
+
+        # output : stride = 8, c = 128
+        self.conv_3 = nn.Sequential(
+            Conv_BN_LeakyReLU(64, 128, 3, 1),
+            Conv_BN_LeakyReLU(128, 64, 1),
+            Conv_BN_LeakyReLU(64, 128, 3, 1),
+            nn.MaxPool2d((2,2), 2)
+        )
+
+        # output : stride = 8, c = 256
+        self.conv_4 = nn.Sequential(
+            Conv_BN_LeakyReLU(128, 256, 3, 1),
+            Conv_BN_LeakyReLU(256, 128, 1),
+            Conv_BN_LeakyReLU(128, 256, 3, 1),
+        )
+
+        # output : stride = 16, c = 512
+        self.maxpool_4 = nn.MaxPool2d((2, 2), 2)
+        self.conv_5 = nn.Sequential(
+            Conv_BN_LeakyReLU(256, 512, 3, 1),
+            Conv_BN_LeakyReLU(512, 256, 1),
+            Conv_BN_LeakyReLU(256, 512, 3, 1),
+            Conv_BN_LeakyReLU(512, 256, 1),
+            Conv_BN_LeakyReLU(256, 512, 3, 1),
+        )
+        
+        # output : stride = 32, c = 1024
+        self.maxpool_5 = nn.MaxPool2d((2, 2), 2)
+        self.conv_6 = nn.Sequential(
+            Conv_BN_LeakyReLU(512, 1024, 3, 1),
+            Conv_BN_LeakyReLU(1024, 512, 1),
+            Conv_BN_LeakyReLU(512, 1024, 3, 1),
+            Conv_BN_LeakyReLU(1024, 512, 1),
+            Conv_BN_LeakyReLU(512, 1024, 3, 1)
+        )
+
+
+    def forward(self, x):
+        c1 = self.conv_1(x)                    # c1
+        c2 = self.conv_2(c1)                   # c2
+        c3 = self.conv_3(c2)                   # c3
+        c3 = self.conv_4(c3)                   # c3
+        c4 = self.conv_5(self.maxpool_4(c3))   # c4
+        c5 = self.conv_6(self.maxpool_5(c4))   # c5
+
+        return c5
+
+
+def build_darknet19(pretrained=False):
+    # model
+    model = DarkNet19()
+    feat_dim = 1024
+
+    # load weight
+    if pretrained:
+        print('Loading pretrained weight ...')
+        url = model_urls['darknet19']
+        # checkpoint state dict
+        checkpoint_state_dict = torch.hub.load_state_dict_from_url(
+            url=url, map_location="cpu", check_hash=True)
+        # model state dict
+        model_state_dict = model.state_dict()
+        # check
+        for k in list(checkpoint_state_dict.keys()):
+            if k in model_state_dict:
+                shape_model = tuple(model_state_dict[k].shape)
+                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
+                if shape_model != shape_checkpoint:
+                    checkpoint_state_dict.pop(k)
+            else:
+                checkpoint_state_dict.pop(k)
+                print(k)
+
+        model.load_state_dict(checkpoint_state_dict)
+
+    return model, feat_dim
+
+
+if __name__ == '__main__':
+    import time
+    model, feat_dim = build_darknet19(pretrained=True)
+    x = torch.randn(1, 3, 224, 224)
+    t0 = time.time()
+    y = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)

+ 77 - 0
models/yolov2/yolov2_basic.py

@@ -0,0 +1,77 @@
+import torch
+import torch.nn as nn
+
+
+class SiLU(nn.Module):
+    """export-friendly version of nn.SiLU()"""
+
+    @staticmethod
+    def forward(x):
+        return x * torch.sigmoid(x)
+
+
+def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
+    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
+
+    return conv
+
+
+def get_activation(act_type=None):
+    if act_type == 'relu':
+        return nn.ReLU(inplace=True)
+    elif act_type == 'lrelu':
+        return nn.LeakyReLU(0.1, inplace=True)
+    elif act_type == 'mish':
+        return nn.Mish(inplace=True)
+    elif act_type == 'silu':
+        return nn.SiLU(inplace=True)
+
+
+def get_norm(norm_type, dim):
+    if norm_type == 'BN':
+        return nn.BatchNorm2d(dim)
+    elif norm_type == 'GN':
+        return nn.GroupNorm(num_groups=32, num_channels=dim)
+
+
+# Basic conv layer
+class Conv(nn.Module):
+    def __init__(self, 
+                 c1,                   # in channels
+                 c2,                   # out channels 
+                 k=1,                  # kernel size 
+                 p=0,                  # padding
+                 s=1,                  # padding
+                 d=1,                  # dilation
+                 act_type='lrelu',     # activation
+                 norm_type='BN',       # normalization
+                 depthwise=False):
+        super(Conv, self).__init__()
+        convs = []
+        add_bias = False if norm_type else True
+        if depthwise:
+            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
+            # depthwise conv
+            if norm_type:
+                convs.append(get_norm(norm_type, c1))
+            if act_type:
+                convs.append(get_activation(act_type))
+            # pointwise conv
+            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+
+        else:
+            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+            
+        self.convs = nn.Sequential(*convs)
+
+
+    def forward(self, x):
+        return self.convs(x)

+ 137 - 0
models/yolov2/yolov2_head.py

@@ -0,0 +1,137 @@
+import torch
+import torch.nn as nn
+try:
+    from .yolov2_basic import Conv
+except:
+    from yolov2_basic import Conv
+
+
+class DecoupledHead(nn.Module):
+    def __init__(self, cfg, in_dim, out_dim, num_classes=80):
+        super().__init__()
+        print('==============================')
+        print('Head: Decoupled Head')
+        self.in_dim = in_dim
+        self.num_cls_head=cfg['num_cls_head']
+        self.num_reg_head=cfg['num_reg_head']
+        self.act_type=cfg['head_act']
+        self.norm_type=cfg['head_norm']
+
+        # cls head
+        cls_feats = []
+        self.cls_out_dim = max(out_dim, num_classes)
+        for i in range(cfg['num_cls_head']):
+            if i == 0:
+                cls_feats.append(
+                    Conv(in_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                cls_feats.append(
+                    Conv(self.cls_out_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+                
+        # reg head
+        reg_feats = []
+        self.reg_out_dim = max(out_dim, 64)
+        for i in range(cfg['num_reg_head']):
+            if i == 0:
+                reg_feats.append(
+                    Conv(in_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                reg_feats.append(
+                    Conv(self.reg_out_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+
+        self.cls_feats = nn.Sequential(*cls_feats)
+        self.reg_feats = nn.Sequential(*reg_feats)
+
+
+    def forward(self, x):
+        """
+            in_feats: (Tensor) [B, C, H, W]
+        """
+        cls_feats = self.cls_feats(x)
+        reg_feats = self.reg_feats(x)
+
+        return cls_feats, reg_feats
+    
+
+# build detection head
+def build_head(cfg, in_dim, out_dim, num_classes=80):
+    head = DecoupledHead(cfg, in_dim, out_dim, num_classes) 
+
+    return head
+
+
+if __name__ == '__main__':
+    import time
+    from thop import profile
+    cfg = {
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'head_depthwise': False,
+        'reg_max': 16,
+    }
+    fpn_dims = [256, 512, 512]
+    # Head-1
+    model = build_head(cfg, 256, fpn_dims, num_classes=80)
+    x = torch.randn(1, 256, 80, 80)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-1: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-2
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 40, 40)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-2: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-2: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-3
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 20, 20)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-3: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-3: Params : {:.2f} M'.format(params / 1e6))

+ 40 - 0
models/yolov2/yolov2_neck.py

@@ -0,0 +1,40 @@
+import torch
+import torch.nn as nn
+from .yolov2_basic import Conv
+
+
+# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+class SPPF(nn.Module):
+    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type='lrelu', norm_type='BN'):
+        super().__init__()
+        inter_dim = int(in_dim * expand_ratio)
+        self.out_dim = out_dim
+        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.m = nn.MaxPool2d(kernel_size=pooling_size, stride=1, padding=pooling_size // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        y1 = self.m(x)
+        y2 = self.m(y1)
+
+        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+def build_neck(cfg, in_dim, out_dim):
+    model = cfg['neck']
+    print('==============================')
+    print('Neck: {}'.format(model))
+    # build neck
+    if model == 'sppf':
+        neck = SPPF(
+            in_dim=in_dim,
+            out_dim=out_dim,
+            expand_ratio=cfg['expand_ratio'], 
+            pooling_size=cfg['pooling_size'],
+            act_type=cfg['neck_act'],
+            norm_type=cfg['neck_norm']
+            )
+
+    return neck
+