Browse Source

keep training YOLOv5-L from 221 epoch

yjh0410 2 years ago
parent
commit
e507306115

+ 7 - 3
config/__init__.py

@@ -3,6 +3,7 @@ from .yolov1_config import yolov1_cfg
 from .yolov2_config import yolov2_cfg
 from .yolov3_config import yolov3_cfg
 from .yolov4_config import yolov4_cfg
+from .yolov5_config import yolov5_cfg
 from .yolov7_config import yolov7_cfg
 from .yolov8_config import yolov8_cfg
 from .yolox_config import yolox_cfg
@@ -23,11 +24,14 @@ def build_model_config(args):
     # YOLOv4
     elif args.model == 'yolov4':
         cfg = yolov4_cfg
+    # YOLOv5
+    elif args.model in ['yolov5_n', 'yolov5_s', 'yolov5_m', 'yolov5_l', 'yolov5_x']:
+        cfg = yolov5_cfg[args.model]
     # YOLOv7
-    elif args.model in ['yolov7_nano', 'yolov7_tiny', 'yolov7_large', 'yolov7_huge']:
+    elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
         cfg = yolov7_cfg[args.model]
     # YOLOv8
-    elif args.model in ['yolov8_nano', 'yolov8_small', 'yolov8_medium', 'yolov8_large', 'yolov8_huge']:
+    elif args.model in ['yolov8_n', 'yolov8_s', 'yolov8_m', 'yolov8_l', 'yolov8_x']:
         cfg = yolov8_cfg[args.model]
     # YOLOX
     elif args.model == 'yolox':
@@ -57,4 +61,4 @@ def build_trans_config(trans_config='ssd'):
     elif trans_config == 'yolov5_nano':
         cfg = yolov5_nano_trans_config
         
-    return cfg
+    return cfg

+ 304 - 0
config/yolov5_config.py

@@ -0,0 +1,304 @@
+# YOLOv5 Config
+
+yolov5_cfg = {
+    'yolov5_n':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.25,
+        'depth': 0.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
+                        [30, 61],   [62, 45],   [59, 119],    # P4
+                        [116, 90],  [156, 198], [373, 326]],  # P5
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'trans_type': 'yolov5_weak',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'anchor_thresh': 4.0,
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 10,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolov5_s':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.50,
+        'depth': 0.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
+                        [30, 61],   [62, 45],   [59, 119],    # P4
+                        [116, 90],  [156, 198], [373, 326]],  # P5
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'trans_type': 'yolov5_weak',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'anchor_thresh': 4.0,
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 10,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolov5_m':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 0.75,
+        'depth': 0.67,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
+                        [30, 61],   [62, 45],   [59, 119],    # P4
+                        [116, 90],  [156, 198], [373, 326]],  # P5
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'trans_type': 'yolov5_strong',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'anchor_thresh': 4.0,
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 10,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolov5_l':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 1.0,
+        'depth': 1.0,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
+                        [30, 61],   [62, 45],   [59, 119],    # P4
+                        [116, 90],  [156, 198], [373, 326]],  # P5
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'trans_type': 'yolov5_strong',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'anchor_thresh': 4.0,
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 10,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+    'yolov5_x':{
+        # ---------------- Model config ----------------
+        ## Backbone
+        'backbone': 'cspdarknet',
+        'pretrained': True,
+        'bk_act': 'silu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'width': 1.25,
+        'depth': 1.34,
+        'stride': [8, 16, 32],  # P3, P4, P5
+        ## FPN
+        'fpn': 'yolov5_pafpn',
+        'fpn_reduce_layer': 'Conv',
+        'fpn_downsample_layer': 'Conv',
+        'fpn_core_block': 'CSPBlock',
+        'fpn_act': 'silu',
+        'fpn_norm': 'BN',
+        'fpn_depthwise': False,
+        ## Head
+        'head': 'decoupled_head',
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_depthwise': False,
+        'anchor_size': [[10, 13],   [16, 30],   [33, 23],     # P3
+                        [30, 61],   [62, 45],   [59, 119],    # P4
+                        [116, 90],  [156, 198], [373, 326]],  # P5
+        # ---------------- Train config ----------------
+        ## input
+        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'trans_type': 'yolov5_strong',
+        # ---------------- Assignment config ----------------
+        ## matcher
+        'anchor_thresh': 4.0,
+        # ---------------- Loss config ----------------
+        ## loss weight
+        'loss_obj_weight': 1.0,
+        'loss_cls_weight': 1.0,
+        'loss_box_weight': 5.0,
+        # ---------------- Train config ----------------
+        ## close strong augmentation
+        'no_aug_epoch': 10,
+        ## optimizer
+        'optimizer': 'sgd',        # optional: sgd, AdamW
+        'momentum': 0.937,         # SGD: 0.937;    AdamW: None
+        'weight_decay': 5e-4,      # SGD: 5e-4;     AdamW: 5e-2
+        'clip_grad': 10,           # SGD: 10.0;     AdamW: -1
+        ## model EMA
+        'ema_decay': 0.9999,       # SGD: 0.9999;   AdamW: 0.9998
+        'ema_tau': 2000,
+        ## lr schedule
+        'scheduler': 'linear',
+        'lr0': 0.01,              # SGD: 0.01;     AdamW: 0.001
+        'lrf': 0.01,               # SGD: 0.01;     AdamW: 0.01
+        'warmup_momentum': 0.8,
+        'warmup_bias_lr': 0.1,
+    },
+
+}

+ 3 - 3
config/yolov7_config.py

@@ -1,7 +1,7 @@
 # YOLOv7 Config
 
 yolov7_cfg = {
-    'yolov7_tiny':{
+    'yolov7_t':{
         # input
         'trans_type': 'yolov5_weak',
         'multi_scale': [0.5, 1.5], # 320 -> 640
@@ -59,7 +59,7 @@ yolov7_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov7_large':{
+    'yolov7_l':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.25], # 320 -> 640
@@ -117,7 +117,7 @@ yolov7_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov7_huge':{
+    'yolov7_x':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.25], # 320 -> 640

+ 5 - 5
config/yolov8_config.py

@@ -1,7 +1,7 @@
 # yolov8 config
 
 yolov8_cfg = {
-    'yolov8_nano':{
+    'yolov8_n':{
         # input
         'trans_type': 'yolov5_weak',
         'multi_scale': [0.5, 1.5],   # 320 -> 960
@@ -62,7 +62,7 @@ yolov8_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov8_small':{
+    'yolov8_s':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.5],   # 320 -> 960
@@ -123,7 +123,7 @@ yolov8_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov8_medium':{
+    'yolov8_m':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.5],   # 320 -> 960
@@ -184,7 +184,7 @@ yolov8_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov8_large':{
+    'yolov8_l':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.5],   # 320 -> 960
@@ -245,7 +245,7 @@ yolov8_cfg = {
         'warmup_bias_lr': 0.1,
     },
 
-    'yolov8_huge':{
+    'yolov8_x':{
         # input
         'trans_type': 'yolov5_strong',
         'multi_scale': [0.5, 1.5],   # 320 -> 960

+ 8 - 3
models/__init__.py

@@ -6,6 +6,7 @@ from .yolov1.build import build_yolov1
 from .yolov2.build import build_yolov2
 from .yolov3.build import build_yolov3
 from .yolov4.build import build_yolov4
+from .yolov5.build import build_yolov5
 from .yolov7.build import build_yolov7
 from .yolov8.build import build_yolov8
 from .yolox.build import build_yolox
@@ -33,12 +34,16 @@ def build_model(args,
     elif args.model == 'yolov4':
         model, criterion = build_yolov4(
             args, model_cfg, device, num_classes, trainable)
+    # YOLOv5   
+    elif args.model in ['yolov5_n', 'yolov5_s', 'yolov5_m', 'yolov5_l', 'yolov5_x']:
+        model, criterion = build_yolov5(
+            args, model_cfg, device, num_classes, trainable)
     # YOLOv7
-    elif args.model in ['yolov7_nano', 'yolov7_tiny', 'yolov7_large', 'yolov7_huge']:
+    elif args.model in ['yolov7_t', 'yolov7_l', 'yolov7_x']:
         model, criterion = build_yolov7(
             args, model_cfg, device, num_classes, trainable)
     # YOLOv8
-    elif args.model in ['yolov8_nano', 'yolov8_small', 'yolov8_medium', 'yolov8_large', 'yolov8_huge']:
+    elif args.model in ['yolov8_n', 'yolov8_s', 'yolov8_m', 'yolov8_l', 'yolov8_x']:
         model, criterion = build_yolov8(
             args, model_cfg, device, num_classes, trainable)
     # YOLOX   
@@ -80,4 +85,4 @@ def build_model(args,
         return model, criterion
 
     else:      
-        return model
+        return model

+ 63 - 0
models/yolov5/build.py

@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+
+import torch
+import torch.nn as nn
+
+from .loss import build_criterion
+from .yolov5 import YOLOv5
+
+
+# build object detector
+def build_yolov5(args, cfg, device, num_classes=80, trainable=False):
+    print('==============================')
+    print('Build {} ...'.format(args.model.upper()))
+    
+    print('==============================')
+    print('Model Configuration: \n', cfg)
+    
+    # -------------- Build YOLO --------------
+    model = YOLOv5(
+        cfg=cfg,
+        device=device, 
+        num_classes=num_classes,
+        trainable=trainable,
+        conf_thresh=args.conf_thresh,
+        nms_thresh=args.nms_thresh,
+        topk=args.topk,
+        )
+
+    # -------------- Initialize YOLO --------------
+    for m in model.modules():
+        if isinstance(m, nn.BatchNorm2d):
+            m.eps = 1e-3
+            m.momentum = 0.03    
+    # Init bias
+    init_prob = 0.01
+    bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
+    # obj pred
+    for obj_pred in model.obj_preds:
+        b = obj_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+    # cls pred
+    for cls_pred in model.cls_preds:
+        b = cls_pred.bias.view(1, -1)
+        b.data.fill_(bias_value.item())
+        cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+    # reg pred
+    for reg_pred in model.reg_preds:
+        b = reg_pred.bias.view(-1, )
+        b.data.fill_(1.0)
+        reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+        w = reg_pred.weight
+        w.data.fill_(0.)
+        reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
+
+
+    # -------------- Build criterion --------------
+    criterion = None
+    if trainable:
+        # build criterion for training
+        criterion = build_criterion(cfg, device, num_classes)
+    return model, criterion

+ 114 - 0
models/yolov5/loss.py

@@ -0,0 +1,114 @@
+import torch
+import torch.nn.functional as F
+from .matcher import Yolov5Matcher
+from utils.box_ops import get_ious
+from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
+
+
+class Criterion(object):
+    def __init__(self, cfg, device, num_classes=80):
+        self.cfg = cfg
+        self.device = device
+        self.num_classes = num_classes
+        # loss weight
+        self.loss_obj_weight = cfg['loss_obj_weight']
+        self.loss_cls_weight = cfg['loss_cls_weight']
+        self.loss_box_weight = cfg['loss_box_weight']
+
+        # matcher
+        self.matcher = Yolov5Matcher(num_classes, 3, cfg['anchor_size'], cfg['anchor_thresh'])
+
+
+    def loss_objectness(self, pred_obj, gt_obj):
+        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
+
+        return loss_obj
+    
+
+    def loss_classes(self, pred_cls, gt_label):
+        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
+
+        return loss_cls
+
+
+    def loss_bboxes(self, pred_box, gt_box):
+        # regression loss
+        ious = get_ious(pred_box,
+                        gt_box,
+                        box_mode="xyxy",
+                        iou_type='giou')
+        loss_box = 1.0 - ious
+
+        return loss_box, ious
+
+
+    def __call__(self, outputs, targets):
+        device = outputs['pred_cls'][0].device
+        fpn_strides = outputs['strides']
+        fmp_sizes = outputs['fmp_sizes']
+        (
+            gt_objectness, 
+            gt_classes, 
+            gt_bboxes,
+            ) = self.matcher(fmp_sizes=fmp_sizes, 
+                             fpn_strides=fpn_strides, 
+                             targets=targets)
+        # List[B, M, C] -> [B, M, C] -> [BM, C]
+        pred_obj = torch.cat(outputs['pred_obj'], dim=1).view(-1)                      # [BM,]
+        pred_cls = torch.cat(outputs['pred_cls'], dim=1).view(-1, self.num_classes)    # [BM, C]
+        pred_box = torch.cat(outputs['pred_box'], dim=1).view(-1, 4)                   # [BM, 4]
+       
+        gt_objectness = gt_objectness.view(-1).to(device).float()               # [BM,]
+        gt_classes = gt_classes.view(-1, self.num_classes).to(device).float()   # [BM, C]
+        gt_bboxes = gt_bboxes.view(-1, 4).to(device).float()                    # [BM, 4]
+
+        pos_masks = (gt_objectness > 0)
+        num_fgs = pos_masks.sum()
+
+        if is_dist_avail_and_initialized():
+            torch.distributed.all_reduce(num_fgs)
+        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
+
+        # box loss
+        pred_box_pos = pred_box[pos_masks]
+        gt_bboxes_pos = gt_bboxes[pos_masks]
+        loss_box, ious = self.loss_bboxes(pred_box_pos, gt_bboxes_pos)
+        loss_box = loss_box.sum() / num_fgs
+        
+        # cls loss
+        pred_cls_pos = pred_cls[pos_masks]
+        gt_classes_pos = gt_classes[pos_masks] * ious.unsqueeze(-1).clamp(0.)
+        loss_cls = self.loss_classes(pred_cls_pos, gt_classes_pos)
+        loss_cls = loss_cls.sum() / num_fgs
+
+        # obj loss
+        loss_obj = self.loss_objectness(pred_obj, gt_objectness)
+        loss_obj = loss_obj.sum() / num_fgs
+
+        # total loss
+        losses = self.loss_obj_weight * loss_obj + \
+                 self.loss_cls_weight * loss_cls + \
+                 self.loss_box_weight * loss_box
+
+        loss_dict = dict(
+                loss_obj = loss_obj,
+                loss_cls = loss_cls,
+                loss_box = loss_box,
+                losses = losses
+        )
+
+        return loss_dict
+    
+
+def build_criterion(cfg, device, num_classes):
+    criterion = Criterion(
+        cfg=cfg,
+        device=device,
+        num_classes=num_classes
+        )
+
+    return criterion
+
+    
+if __name__ == "__main__":
+    pass

+ 215 - 0
models/yolov5/matcher.py

@@ -0,0 +1,215 @@
+import numpy as np
+import torch
+
+
+class Yolov5Matcher(object):
+    def __init__(self, num_classes, num_anchors, anchor_size, anchor_theshold):
+        self.num_classes = num_classes
+        self.num_anchors = num_anchors
+        self.anchor_theshold = anchor_theshold
+        # [KA, 2]
+        self.anchor_sizes = np.array([[anchor[0], anchor[1]]
+                                      for anchor in anchor_size])
+        # [KA, 4]
+        self.anchor_boxes = np.array([[0., 0., anchor[0], anchor[1]]
+                                      for anchor in anchor_size])
+
+    def compute_iou(self, anchor_boxes, gt_box):
+        """
+            anchor_boxes : ndarray -> [KA, 4] (cx, cy, bw, bh).
+            gt_box : ndarray -> [1, 4] (cx, cy, bw, bh).
+        """
+        # anchors: [KA, 4]
+        anchors = np.zeros_like(anchor_boxes)
+        anchors[..., :2] = anchor_boxes[..., :2] - anchor_boxes[..., 2:] * 0.5  # x1y1
+        anchors[..., 2:] = anchor_boxes[..., :2] + anchor_boxes[..., 2:] * 0.5  # x2y2
+        anchors_area = anchor_boxes[..., 2] * anchor_boxes[..., 3]
+        
+        # gt_box: [1, 4] -> [KA, 4]
+        gt_box = np.array(gt_box).reshape(-1, 4)
+        gt_box = np.repeat(gt_box, anchors.shape[0], axis=0)
+        gt_box_ = np.zeros_like(gt_box)
+        gt_box_[..., :2] = gt_box[..., :2] - gt_box[..., 2:] * 0.5  # x1y1
+        gt_box_[..., 2:] = gt_box[..., :2] + gt_box[..., 2:] * 0.5  # x2y2
+        gt_box_area = np.prod(gt_box[..., 2:] - gt_box[..., :2], axis=1)
+
+        # intersection
+        inter_w = np.minimum(anchors[:, 2], gt_box_[:, 2]) - \
+                  np.maximum(anchors[:, 0], gt_box_[:, 0])
+        inter_h = np.minimum(anchors[:, 3], gt_box_[:, 3]) - \
+                  np.maximum(anchors[:, 1], gt_box_[:, 1])
+        inter_area = inter_w * inter_h
+        
+        # union
+        union_area = anchors_area + gt_box_area - inter_area
+
+        # iou
+        iou = inter_area / union_area
+        iou = np.clip(iou, a_min=1e-10, a_max=1.0)
+        
+        return iou
+
+
+    def iou_assignment(self, ctr_points, gt_box, fpn_strides):
+        # compute IoU
+        iou = self.compute_iou(self.anchor_boxes, gt_box)
+        iou_mask = (iou > 0.5)
+
+        label_assignment_results = []
+        if iou_mask.sum() == 0:
+            # We assign the anchor box with highest IoU score.
+            iou_ind = np.argmax(iou)
+
+            level = iou_ind // self.num_anchors              # pyramid level
+            anchor_idx = iou_ind - level * self.num_anchors  # anchor index
+
+            # get the corresponding stride
+            stride = fpn_strides[level]
+
+            # compute the grid cell
+            xc, yc = ctr_points
+            xc_s = xc / stride
+            yc_s = yc / stride
+            grid_x = int(xc_s)
+            grid_y = int(yc_s)
+
+            label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
+        else:            
+            for iou_ind, iou_m in enumerate(iou_mask):
+                if iou_m:
+                    level = iou_ind // self.num_anchors              # pyramid level
+                    anchor_idx = iou_ind - level * self.num_anchors  # anchor index
+
+                    # get the corresponding stride
+                    stride = fpn_strides[level]
+
+                    # compute the gride cell
+                    xc, yc = ctr_points
+                    xc_s = xc / stride
+                    yc_s = yc / stride
+                    grid_x = int(xc_s)
+                    grid_y = int(yc_s)
+
+                    label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
+
+        return label_assignment_results
+
+
+    def aspect_ratio_assignment(self, ctr_points, keeps, fpn_strides):
+        label_assignment_results = []
+        for keep_idx, keep in enumerate(keeps):
+            if keep:
+                level = keep_idx // self.num_anchors              # pyramid level
+                anchor_idx = keep_idx - level * self.num_anchors  # anchor index
+
+                # get the corresponding stride
+                stride = fpn_strides[level]
+
+                # compute the gride cell
+                xc, yc = ctr_points
+                xc_s = xc / stride
+                yc_s = yc / stride
+                grid_x = int(xc_s)
+                grid_y = int(yc_s)
+
+                label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
+        
+        return label_assignment_results
+    
+
+    @torch.no_grad()
+    def __call__(self, fmp_sizes, fpn_strides, targets):
+        """
+            fmp_size: (List) [fmp_h, fmp_w]
+            fpn_strides: (List) -> [8, 16, 32, ...] stride of network output.
+            targets: (Dict) dict{'boxes': [...], 
+                                 'labels': [...], 
+                                 'orig_size': ...}
+        """
+        assert len(fmp_sizes) == len(fpn_strides)
+        # prepare
+        bs = len(targets)
+        gt_objectness = [
+            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 1]) 
+            for (fmp_h, fmp_w) in fmp_sizes
+            ]
+        gt_classes = [
+            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, self.num_classes]) 
+            for (fmp_h, fmp_w) in fmp_sizes
+            ]
+        gt_bboxes = [
+            torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 4]) 
+            for (fmp_h, fmp_w) in fmp_sizes
+            ]
+
+        for batch_index in range(bs):
+            targets_per_image = targets[batch_index]
+            # [N,]
+            tgt_cls = targets_per_image["labels"].numpy()
+            # [N, 4]
+            tgt_box = targets_per_image['boxes'].numpy()
+
+            for gt_box, gt_label in zip(tgt_box, tgt_cls):
+                # get a bbox coords
+                x1, y1, x2, y2 = gt_box.tolist()
+                # xyxy -> cxcywh
+                xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
+                bw, bh = x2 - x1, y2 - y1
+                gt_box = np.array([[0., 0., bw, bh]])
+
+                # check target
+                if bw < 1. or bh < 1.:
+                    # invalid target
+                    continue
+
+                # compute aspect ratio
+                ratios = gt_box[..., 2:] / self.anchor_sizes
+                keeps = np.maximum(ratios, 1 / ratios).max(-1) < self.anchor_theshold
+
+                if keeps.sum() == 0:
+                    label_assignment_results = self.iou_assignment([xc, yc], gt_box, fpn_strides)
+                else:
+                    label_assignment_results = self.aspect_ratio_assignment([xc, yc], keeps, fpn_strides)
+
+                # label assignment
+                for result in label_assignment_results:
+                    # assignment
+                    grid_x, grid_y, xc_s, yc_s, level, anchor_idx = result
+                    stride = fpn_strides[level]
+                    fmp_h, fmp_w = fmp_sizes[level]
+                    # coord on the feature
+                    x1s, y1s = x1 / stride, y1 / stride
+                    x2s, y2s = x2 / stride, y2 / stride
+                    # offset
+                    off_x = xc_s - grid_x
+                    off_y = yc_s - grid_y
+ 
+                    if off_x <= 0.5 and off_y <= 0.5:  # top left
+                        grids = [(grid_x-1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
+                    elif off_x > 0.5 and off_y <= 0.5: # top right
+                        grids = [(grid_x+1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
+                    elif off_x <= 0.5 and off_y > 0.5: # bottom left
+                        grids = [(grid_x-1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
+                    elif off_x > 0.5 and off_y > 0.5:  # bottom right
+                        grids = [(grid_x+1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
+
+                    for (i, j) in grids:
+                        is_in_box = (j >= y1s and j < y2s) and (i >= x1s and i < x2s)
+                        is_valid = (j >= 0 and j < fmp_h) and (i >= 0 and i < fmp_w)
+
+                        if is_in_box and is_valid:
+                            # obj
+                            gt_objectness[level][batch_index, j, i, anchor_idx] = 1.0
+                            # cls
+                            cls_ont_hot = torch.zeros(self.num_classes)
+                            cls_ont_hot[int(gt_label)] = 1.0
+                            gt_classes[level][batch_index, j, i, anchor_idx] = cls_ont_hot
+                            # box
+                            gt_bboxes[level][batch_index, j, i, anchor_idx] = torch.as_tensor([x1, y1, x2, y2])
+
+        # [B, M, C]
+        gt_objectness = torch.cat([gt.view(bs, -1, 1) for gt in gt_objectness], dim=1).float()
+        gt_classes = torch.cat([gt.view(bs, -1, self.num_classes) for gt in gt_classes], dim=1).float()
+        gt_bboxes = torch.cat([gt.view(bs, -1, 4) for gt in gt_bboxes], dim=1).float()
+
+        return gt_objectness, gt_classes, gt_bboxes

+ 246 - 0
models/yolov5/yolov5.py

@@ -0,0 +1,246 @@
+import torch
+import torch.nn as nn
+
+from .yolov5_backbone import build_backbone
+from .yolov5_pafpn import build_fpn
+from .yolov5_head import build_head
+
+from utils.nms import multiclass_nms
+
+
+class YOLOv5(nn.Module):
+    def __init__(self, 
+                 cfg,
+                 device, 
+                 num_classes = 20, 
+                 conf_thresh = 0.05,
+                 nms_thresh = 0.6,
+                 trainable = False, 
+                 topk = 1000):
+        super(YOLOv5, self).__init__()
+        # ---------------------- Basic Parameters ----------------------
+        self.cfg = cfg
+        self.device = device
+        self.stride = cfg['stride']
+        self.num_classes = num_classes
+        self.trainable = trainable
+        self.conf_thresh = conf_thresh
+        self.nms_thresh = nms_thresh
+        self.topk = topk
+        
+        # ------------------- Anchor box -------------------
+        self.num_levels = 3
+        self.num_anchors = len(cfg['anchor_size']) // self.num_levels
+        self.anchor_size = torch.as_tensor(
+            cfg['anchor_size']
+            ).view(self.num_levels, self.num_anchors, 2) # [S, A, 2]
+        
+        # ------------------- Network Structure -------------------
+        ## Backbone
+        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
+        
+        ## FPN
+        self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))
+        self.head_dim = self.fpn.out_dim
+
+        ## Head
+        self.non_shared_heads = nn.ModuleList(
+            [build_head(cfg, head_dim, head_dim, num_classes) 
+            for head_dim in self.head_dim
+            ])
+
+        ## Pred
+        self.obj_preds = nn.ModuleList(
+                            [nn.Conv2d(head.reg_out_dim, 1 * self.num_anchors, kernel_size=1) 
+                                for head in self.non_shared_heads
+                              ]) 
+        self.cls_preds = nn.ModuleList(
+                            [nn.Conv2d(head.cls_out_dim, self.num_classes * self.num_anchors, kernel_size=1) 
+                                for head in self.non_shared_heads
+                              ]) 
+        self.reg_preds = nn.ModuleList(
+                            [nn.Conv2d(head.reg_out_dim, 4 * self.num_anchors, kernel_size=1) 
+                                for head in self.non_shared_heads
+                              ])                 
+
+
+    # ---------------------- Basic Functions ----------------------
+    ## generate anchor points
+    def generate_anchors(self, level, fmp_size):
+        fmp_h, fmp_w = fmp_size
+        # [KA, 2]
+        anchor_size = self.anchor_size[level]
+
+        # generate grid cells
+        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
+        anchor_xy = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
+        # [HW, 2] -> [HW, KA, 2] -> [M, 2]
+        anchor_xy = anchor_xy.unsqueeze(1).repeat(1, self.num_anchors, 1)
+        anchor_xy = anchor_xy.view(-1, 2).to(self.device)
+
+        # [KA, 2] -> [1, KA, 2] -> [HW, KA, 2] -> [M, 2]
+        anchor_wh = anchor_size.unsqueeze(0).repeat(fmp_h*fmp_w, 1, 1)
+        anchor_wh = anchor_wh.view(-1, 2).to(self.device)
+
+        anchors = torch.cat([anchor_xy, anchor_wh], dim=-1)
+
+        return anchors
+        
+    ## post-process
+    def post_process(self, obj_preds, cls_preds, box_preds):
+        """
+        Input:
+            obj_preds: List(Tensor) [[H x W x A, 1], ...]
+            cls_preds: List(Tensor) [[H x W x A, C], ...]
+            box_preds: List(Tensor) [[H x W x A, 4], ...]
+            anchors:   List(Tensor) [[H x W x A, 2], ...]
+        """
+        all_scores = []
+        all_labels = []
+        all_bboxes = []
+        
+        for obj_pred_i, cls_pred_i, box_pred_i in zip(obj_preds, cls_preds, box_preds):
+            # (H x W x KA x C,)
+            scores_i = (torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid())).flatten()
+
+            # Keep top k top scoring indices only.
+            num_topk = min(self.topk, box_pred_i.size(0))
+
+            # torch.sort is actually faster than .topk (at least on GPUs)
+            predicted_prob, topk_idxs = scores_i.sort(descending=True)
+            topk_scores = predicted_prob[:num_topk]
+            topk_idxs = topk_idxs[:num_topk]
+
+            # filter out the proposals with low confidence score
+            keep_idxs = topk_scores > self.conf_thresh
+            scores = topk_scores[keep_idxs]
+            topk_idxs = topk_idxs[keep_idxs]
+
+            anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
+            labels = topk_idxs % self.num_classes
+
+            bboxes = box_pred_i[anchor_idxs]
+
+            all_scores.append(scores)
+            all_labels.append(labels)
+            all_bboxes.append(bboxes)
+
+        scores = torch.cat(all_scores)
+        labels = torch.cat(all_labels)
+        bboxes = torch.cat(all_bboxes)
+
+        # to cpu & numpy
+        scores = scores.cpu().numpy()
+        labels = labels.cpu().numpy()
+        bboxes = bboxes.cpu().numpy()
+
+        # nms
+        scores, labels, bboxes = multiclass_nms(
+            scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
+
+        return bboxes, scores, labels
+
+    # ---------------------- Main Process for Inference ----------------------
+    @torch.no_grad()
+    def inference_single_image(self, x):
+        # backbone
+        pyramid_feats = self.backbone(x)
+
+        # fpn
+        pyramid_feats = self.fpn(pyramid_feats)
+
+        # non-shared heads
+        all_anchors = []
+        all_obj_preds = []
+        all_cls_preds = []
+        all_box_preds = []
+        for level, (feat, head) in enumerate(zip(pyramid_feats, self.non_shared_heads)):
+            cls_feat, reg_feat = head(feat)
+
+            # [1, C, H, W]
+            obj_pred = self.obj_preds[level](reg_feat)
+            cls_pred = self.cls_preds[level](cls_feat)
+            reg_pred = self.reg_preds[level](reg_feat)
+
+            # anchors: [M, 4]
+            fmp_size = cls_pred.shape[-2:]
+            anchors = self.generate_anchors(level, fmp_size)
+
+            # [1, C, H, W] -> [H, W, C] -> [M, C]
+            obj_pred = obj_pred[0].permute(1, 2, 0).contiguous().view(-1, 1)
+            cls_pred = cls_pred[0].permute(1, 2, 0).contiguous().view(-1, self.num_classes)
+            reg_pred = reg_pred[0].permute(1, 2, 0).contiguous().view(-1, 4)
+
+            # decode bbox
+            ctr_pred = (torch.sigmoid(reg_pred[..., :2]) * 2.0 - 0.5 + anchors[..., :2]) * self.stride[level]
+            wh_pred = torch.exp(reg_pred[..., 2:]) * anchors[..., 2:]
+            pred_x1y1 = ctr_pred - wh_pred * 0.5
+            pred_x2y2 = ctr_pred + wh_pred * 0.5
+            box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
+
+            all_obj_preds.append(obj_pred)
+            all_cls_preds.append(cls_pred)
+            all_box_preds.append(box_pred)
+            all_anchors.append(anchors)
+
+        # post process
+        bboxes, scores, labels = self.post_process(
+            all_obj_preds, all_cls_preds, all_box_preds)
+        
+        return bboxes, scores, labels
+
+    # ---------------------- Main Process for Training ----------------------
+    def forward(self, x):
+        if not self.trainable:
+            return self.inference_single_image(x)
+        else:
+            # backbone
+            pyramid_feats = self.backbone(x)
+
+            # fpn
+            pyramid_feats = self.fpn(pyramid_feats)
+
+            # non-shared heads
+            all_fmp_sizes = []
+            all_obj_preds = []
+            all_cls_preds = []
+            all_box_preds = []
+            for level, (feat, head) in enumerate(zip(pyramid_feats, self.non_shared_heads)):
+                cls_feat, reg_feat = head(feat)
+
+                # [B, C, H, W]
+                obj_pred = self.obj_preds[level](reg_feat)
+                cls_pred = self.cls_preds[level](cls_feat)
+                reg_pred = self.reg_preds[level](reg_feat)
+
+                B, _, H, W = cls_pred.size()
+                fmp_size = [H, W]
+                # generate anchor boxes: [M, 4]
+                anchors = self.generate_anchors(level, fmp_size)
+                
+                # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
+                obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 1)
+                cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
+                reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
+
+                # decode bbox
+                ctr_pred = (torch.sigmoid(reg_pred[..., :2]) * 2.0 - 0.5 + anchors[..., :2]) * self.stride[level]
+                wh_pred = torch.exp(reg_pred[..., 2:]) * anchors[..., 2:]
+                pred_x1y1 = ctr_pred - wh_pred * 0.5
+                pred_x2y2 = ctr_pred + wh_pred * 0.5
+                box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
+
+                all_obj_preds.append(obj_pred)
+                all_cls_preds.append(cls_pred)
+                all_box_preds.append(box_pred)
+                all_fmp_sizes.append(fmp_size)
+            
+            # output dict
+            outputs = {"pred_obj": all_obj_preds,        # List [B, M, 1]
+                       "pred_cls": all_cls_preds,        # List [B, M, C]
+                       "pred_box": all_box_preds,        # List [B, M, 4]
+                       'fmp_sizes': all_fmp_sizes,       # List
+                       'strides': self.stride,           # List
+                       }
+
+            return outputs 

+ 150 - 0
models/yolov5/yolov5_backbone.py

@@ -0,0 +1,150 @@
+import torch
+import torch.nn as nn
+
+try:
+    from .yolov5_basic import Conv, CSPBlock
+    from .yolov5_neck import SPPF
+except:
+    from yolov5_basic import Conv, CSPBlock
+    from yolov5_neck import SPPF
+
+model_urls = {
+    "cspdarknet_nano": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_nano.pth",
+    "cspdarknet_small": None,
+    "cspdarknet_small": None,
+    "cspdarknet_large": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_large.pth",
+    "cspdarknet_huge": None,
+}
+
+# CSPDarkNet
+class CSPDarkNet(nn.Module):
+    def __init__(self, depth=1.0, width=1.0, act_type='silu', norm_type='BN', depthwise=False):
+        super(CSPDarkNet, self).__init__()
+        self.feat_dims = [int(256*width), int(512*width), int(1024*width)]
+
+        # P1
+        self.layer_1 = Conv(3, int(64*width), k=6, p=2, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        
+        # P2
+        self.layer_2 = nn.Sequential(
+            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(int(128*width), int(128*width), expand_ratio=0.5, nblocks=int(3*depth),
+                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        )
+        # P3
+        self.layer_3 = nn.Sequential(
+            Conv(int(128*width), int(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(int(256*width), int(256*width), expand_ratio=0.5, nblocks=int(9*depth),
+                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        )
+        # P4
+        self.layer_4 = nn.Sequential(
+            Conv(int(256*width), int(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(int(512*width), int(512*width), expand_ratio=0.5, nblocks=int(9*depth),
+                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        )
+        # P5
+        self.layer_5 = nn.Sequential(
+            Conv(int(512*width), int(1024*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            SPPF(int(1024*width), int(1024*width), expand_ratio=0.5),
+            CSPBlock(int(1024*width), int(1024*width), expand_ratio=0.5, nblocks=int(3*depth),
+                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        )
+
+
+    def forward(self, x):
+        c1 = self.layer_1(x)
+        c2 = self.layer_2(c1)
+        c3 = self.layer_3(c2)
+        c4 = self.layer_4(c3)
+        c5 = self.layer_5(c4)
+
+        outputs = [c3, c4, c5]
+
+        return outputs
+
+
+# ---------------------------- Functions ----------------------------
+## load pretrained weight
+def load_weight(model, model_name):
+    # load weight
+    print('Loading pretrained weight ...')
+    url = model_urls[model_name]
+    if url is not None:
+        checkpoint = torch.hub.load_state_dict_from_url(
+            url=url, map_location="cpu", check_hash=True)
+        # checkpoint state dict
+        checkpoint_state_dict = checkpoint.pop("model")
+        # model state dict
+        model_state_dict = model.state_dict()
+        # check
+        for k in list(checkpoint_state_dict.keys()):
+            if k in model_state_dict:
+                shape_model = tuple(model_state_dict[k].shape)
+                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
+                if shape_model != shape_checkpoint:
+                    checkpoint_state_dict.pop(k)
+            else:
+                checkpoint_state_dict.pop(k)
+                print(k)
+
+        model.load_state_dict(checkpoint_state_dict)
+    else:
+        print('No pretrained for {}'.format(model_name))
+
+    return model
+
+
+## build CSPDarkNet
+def build_backbone(cfg, pretrained=False): 
+    """Constructs a darknet-53 model.
+    Args:
+        pretrained (bool): If True, returns a model pre-trained on ImageNet
+    """
+    backbone = CSPDarkNet(cfg['depth'], cfg['width'], cfg['bk_act'], cfg['bk_norm'], cfg['bk_dpw'])
+    feat_dims = backbone.feat_dims
+
+    # check whether to load imagenet pretrained weight
+    if pretrained:
+        if cfg['width'] == 0.25 and cfg['depth'] == 0.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_nano')
+        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_small')
+        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
+            backbone = load_weight(backbone, model_name='cspdarknet_medium')
+        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
+            backbone = load_weight(backbone, model_name='cspdarknet_large')
+        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
+            backbone = load_weight(backbone, model_name='cspdarknet_huge')
+
+    return backbone, feat_dims
+
+
+if __name__ == '__main__':
+    import time
+    from thop import profile
+    cfg = {
+        'pretrained': False,
+        'bk_act': 'lrelu',
+        'bk_norm': 'BN',
+        'bk_dpw': False,
+        'p6_feat': False,
+        'p7_feat': False,
+        'width': 1.0,
+        'depth': 1.0,
+    }
+    model, feats = build_backbone(cfg)
+    x = torch.randn(1, 3, 224, 224)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    for out in outputs:
+        print(out.shape)
+
+    x = torch.randn(1, 3, 224, 224)
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 165 - 0
models/yolov5/yolov5_basic.py

@@ -0,0 +1,165 @@
+import numpy as np
+import torch
+import torch.nn as nn
+
+
+# ---------------------------- 2D CNN ----------------------------
+class SiLU(nn.Module):
+    """export-friendly version of nn.SiLU()"""
+
+    @staticmethod
+    def forward(x):
+        return x * torch.sigmoid(x)
+
+
+def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
+    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
+
+    return conv
+
+
+def get_activation(act_type=None):
+    if act_type == 'relu':
+        return nn.ReLU(inplace=True)
+    elif act_type == 'lrelu':
+        return nn.LeakyReLU(0.1, inplace=True)
+    elif act_type == 'mish':
+        return nn.Mish(inplace=True)
+    elif act_type == 'silu':
+        return nn.SiLU(inplace=True)
+    elif act_type is None:
+        return nn.Identity()
+
+
+def get_norm(norm_type, dim):
+    if norm_type == 'BN':
+        return nn.BatchNorm2d(dim)
+    elif norm_type == 'GN':
+        return nn.GroupNorm(num_groups=32, num_channels=dim)
+
+
+# Basic conv layer
+class Conv(nn.Module):
+    def __init__(self, 
+                 c1,                   # in channels
+                 c2,                   # out channels 
+                 k=1,                  # kernel size 
+                 p=0,                  # padding
+                 s=1,                  # padding
+                 d=1,                  # dilation
+                 act_type='lrelu',     # activation
+                 norm_type='BN',       # normalization
+                 depthwise=False):
+        super(Conv, self).__init__()
+        convs = []
+        add_bias = False if norm_type else True
+        if depthwise:
+            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
+            # depthwise conv
+            if norm_type:
+                convs.append(get_norm(norm_type, c1))
+            if act_type:
+                convs.append(get_activation(act_type))
+            # pointwise conv
+            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+
+        else:
+            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+            
+        self.convs = nn.Sequential(*convs)
+
+
+    def forward(self, x):
+        return self.convs(x)
+
+
+# ---------------------------- YOLOv5 Modules ----------------------------
+## BottleNeck
+class Bottleneck(nn.Module):
+    def __init__(self,
+                 in_dim,
+                 out_dim,
+                 expand_ratio=0.5,
+                 shortcut=False,
+                 depthwise=False,
+                 act_type='silu',
+                 norm_type='BN'):
+        super(Bottleneck, self).__init__()
+        inter_dim = int(out_dim * expand_ratio)  # hidden channels            
+        self.cv1 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
+        self.cv2 = Conv(inter_dim, out_dim, k=3, p=1, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
+        self.shortcut = shortcut and in_dim == out_dim
+
+    def forward(self, x):
+        h = self.cv2(self.cv1(x))
+
+        return x + h if self.shortcut else h
+
+## CSP-stage block
+class CSPBlock(nn.Module):
+    def __init__(self,
+                 in_dim,
+                 out_dim,
+                 expand_ratio=0.5,
+                 nblocks=1,
+                 shortcut=False,
+                 depthwise=False,
+                 act_type='silu',
+                 norm_type='BN'):
+        super(CSPBlock, self).__init__()
+        inter_dim = int(out_dim * expand_ratio)
+        self.cv1 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
+        self.cv2 = Conv(in_dim, inter_dim, k=1, norm_type=norm_type, act_type=act_type)
+        self.cv3 = Conv(2 * inter_dim, out_dim, k=1, norm_type=norm_type, act_type=act_type)
+        self.m = nn.Sequential(*[
+            Bottleneck(inter_dim, inter_dim, expand_ratio=1.0, shortcut=shortcut,
+                       norm_type=norm_type, act_type=act_type, depthwise=depthwise)
+                       for _ in range(nblocks)
+                       ])
+
+    def forward(self, x):
+        x1 = self.cv1(x)
+        x2 = self.cv2(x)
+        x3 = self.m(x1)
+        out = self.cv3(torch.cat([x3, x2], dim=1))
+
+        return out
+    
+
+# ---------------------------- FPN Modules ----------------------------
+## build fpn's core block
+def build_fpn_block(cfg, in_dim, out_dim):
+    if cfg['fpn_core_block'] == 'CSPBlock':
+        layer = CSPBlock(in_dim=in_dim,
+                         out_dim=out_dim,
+                         expand_ratio=0.5,
+                         nblocks = round(3*cfg['depth']),
+                         shortcut = False,
+                         act_type=cfg['fpn_act'],
+                         norm_type=cfg['fpn_norm'],
+                         depthwise=cfg['fpn_depthwise']
+                         )
+        
+    return layer
+
+## build fpn's reduce layer
+def build_reduce_layer(cfg, in_dim, out_dim):
+    if cfg['fpn_reduce_layer'] == 'Conv':
+        layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+        
+    return layer
+
+## build fpn's downsample layer
+def build_downsample_layer(cfg, in_dim, out_dim):
+    if cfg['fpn_downsample_layer'] == 'Conv':
+        layer = Conv(in_dim, out_dim, k=3, s=2, p=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+        
+    return layer

+ 136 - 0
models/yolov5/yolov5_head.py

@@ -0,0 +1,136 @@
+import torch
+import torch.nn as nn
+try:
+    from .yolov5_basic import Conv
+except:
+    from yolov5_basic import Conv
+
+
+class DecoupledHead(nn.Module):
+    def __init__(self, cfg, in_dim, out_dim, num_classes=80):
+        super().__init__()
+        print('==============================')
+        print('Head: Decoupled Head')
+        # --------- Basic Parameters ----------
+        self.in_dim = in_dim
+        self.num_cls_head=cfg['num_cls_head']
+        self.num_reg_head=cfg['num_reg_head']
+
+        # --------- Network Parameters ----------
+        ## cls head
+        cls_feats = []
+        self.cls_out_dim = max(out_dim, num_classes)
+        for i in range(cfg['num_cls_head']):
+            if i == 0:
+                cls_feats.append(
+                    Conv(in_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                cls_feats.append(
+                    Conv(self.cls_out_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
+                        depthwise=cfg['head_depthwise'])
+                        )      
+        ## reg head
+        reg_feats = []
+        self.reg_out_dim = max(out_dim, 64)
+        for i in range(cfg['num_reg_head']):
+            if i == 0:
+                reg_feats.append(
+                    Conv(in_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                reg_feats.append(
+                    Conv(self.reg_out_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=cfg['head_act'],
+                        norm_type=cfg['head_norm'],
+                        depthwise=cfg['head_depthwise'])
+                        )
+
+        self.cls_feats = nn.Sequential(*cls_feats)
+        self.reg_feats = nn.Sequential(*reg_feats)
+
+
+    def forward(self, x):
+        """
+            in_feats: (Tensor) [B, C, H, W]
+        """
+        cls_feats = self.cls_feats(x)
+        reg_feats = self.reg_feats(x)
+
+        return cls_feats, reg_feats
+    
+
+# build detection head
+def build_head(cfg, in_dim, out_dim, num_classes=80):
+    head = DecoupledHead(cfg, in_dim, out_dim, num_classes) 
+
+    return head
+
+
+if __name__ == '__main__':
+    import time
+    from thop import profile
+    cfg = {
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'head_depthwise': False,
+        'reg_max': 16,
+    }
+    fpn_dims = [256, 512, 512]
+    # Head-1
+    model = build_head(cfg, 256, fpn_dims, num_classes=80)
+    x = torch.randn(1, 256, 80, 80)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-1: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-2
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 40, 40)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-2: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-2: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-3
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 20, 20)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-3: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-3: Params : {:.2f} M'.format(params / 1e6))

+ 98 - 0
models/yolov5/yolov5_neck.py

@@ -0,0 +1,98 @@
+import torch
+import torch.nn as nn
+from .yolov5_basic import Conv
+
+
+# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+class SPPF(nn.Module):
+    """
+        This code referenced to https://github.com/ultralytics/yolov5
+    """
+    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type='silu', norm_type='BN'):
+        super().__init__()
+        inter_dim = int(in_dim * expand_ratio)
+        self.out_dim = out_dim
+        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.m = nn.MaxPool2d(kernel_size=pooling_size, stride=1, padding=pooling_size // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        y1 = self.m(x)
+        y2 = self.m(y1)
+
+        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+# SPPF block with CSP module
+class SPPFBlockCSP(nn.Module):
+    """
+        CSP Spatial Pyramid Pooling Block
+    """
+    def __init__(self,
+                 in_dim,
+                 out_dim,
+                 expand_ratio=0.5,
+                 pooling_size=5,
+                 act_type='silu',
+                 norm_type='BN',
+                 depthwise=False
+                 ):
+        super(SPPFBlockCSP, self).__init__()
+        inter_dim = int(in_dim * expand_ratio)
+        self.out_dim = out_dim
+        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.cv2 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.m = nn.Sequential(
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=act_type, norm_type=norm_type, 
+                 depthwise=depthwise),
+            SPPF(inter_dim, 
+                 inter_dim, 
+                 expand_ratio=1.0, 
+                 pooling_size=pooling_size, 
+                 act_type=act_type, 
+                 norm_type=norm_type),
+            Conv(inter_dim, inter_dim, k=3, p=1, 
+                 act_type=act_type, norm_type=norm_type, 
+                 depthwise=depthwise)
+        )
+        self.cv3 = Conv(inter_dim * 2, self.out_dim, k=1, act_type=act_type, norm_type=norm_type)
+
+        
+    def forward(self, x):
+        x1 = self.cv1(x)
+        x2 = self.cv2(x)
+        x3 = self.m(x2)
+        y = self.cv3(torch.cat([x1, x3], dim=1))
+
+        return y
+
+
+def build_neck(cfg, in_dim, out_dim):
+    model = cfg['neck']
+    print('==============================')
+    print('Neck: {}'.format(model))
+    # build neck
+    if model == 'sppf':
+        neck = SPPF(
+            in_dim=in_dim,
+            out_dim=out_dim,
+            expand_ratio=cfg['expand_ratio'], 
+            pooling_size=cfg['pooling_size'],
+            act_type=cfg['neck_act'],
+            norm_type=cfg['neck_norm']
+            )
+    elif model == 'csp_sppf':
+        neck = SPPFBlockCSP(
+            in_dim=in_dim,
+            out_dim=out_dim,
+            expand_ratio=cfg['expand_ratio'], 
+            pooling_size=cfg['pooling_size'],
+            act_type=cfg['neck_act'],
+            norm_type=cfg['neck_norm'],
+            depthwise=cfg['neck_depthwise']
+            )
+
+    return neck
+        

+ 92 - 0
models/yolov5/yolov5_pafpn.py

@@ -0,0 +1,92 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from .yolov5_basic import (Conv, build_reduce_layer, build_downsample_layer, build_fpn_block)
+
+
+# YOLO-Style PaFPN
+class Yolov5PaFPN(nn.Module):
+    def __init__(self, cfg, in_dims=[256, 512, 1024], out_dim=None):
+        super(Yolov5PaFPN, self).__init__()
+        # --------------------------- Basic Parameters ---------------------------
+        self.in_dims = in_dims
+        c3, c4, c5 = in_dims
+        width = cfg['width']
+
+        # --------------------------- Network Parameters ---------------------------
+        ## top dwon
+        ### P5 -> P4
+        self.reduce_layer_1 = build_reduce_layer(cfg, c5, round(512*width))
+        self.top_down_layer_1 = build_fpn_block(cfg, c4 + round(512*width), round(512*width))
+
+        ### P4 -> P3
+        self.reduce_layer_2 = build_reduce_layer(cfg, round(512*width), round(256*width))
+        self.top_down_layer_2 = build_fpn_block(cfg, c3 + round(256*width), round(256*width))
+
+        ## bottom up
+        ### P3 -> P4
+        self.downsample_layer_1 = build_downsample_layer(cfg, round(256*width), round(256*width))
+        self.bottom_up_layer_1 = build_fpn_block(cfg, round(256*width) + round(256*width), round(512*width))
+
+        ### P4 -> P5
+        self.downsample_layer_2 = build_downsample_layer(cfg, round(512*width), round(512*width))
+        self.bottom_up_layer_2 = build_fpn_block(cfg, round(512*width) + round(512*width), round(1024*width))
+                
+        ## output proj layers
+        if out_dim is not None:
+            self.out_layers = nn.ModuleList([
+                Conv(in_dim, out_dim, k=1,
+                     act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
+                     for in_dim in [round(256*width), round(512*width), round(1024*width)]
+                     ])
+            self.out_dim = [out_dim] * 3
+        else:
+            self.out_layers = None
+            self.out_dim = [round(256*width), round(512*width), round(1024*width)]
+
+
+    def forward(self, features):
+        c3, c4, c5 = features
+
+        # Top down
+        ## P5 -> P4
+        c6 = self.reduce_layer_1(c5)
+        c7 = F.interpolate(c6, scale_factor=2.0)
+        c8 = torch.cat([c7, c4], dim=1)
+        c9 = self.top_down_layer_1(c8)
+        ## P4 -> P3
+        c10 = self.reduce_layer_2(c9)
+        c11 = F.interpolate(c10, scale_factor=2.0)
+        c12 = torch.cat([c11, c3], dim=1)
+        c13 = self.top_down_layer_2(c12)
+
+        # Bottom up
+        ## p3 -> P4
+        c14 = self.downsample_layer_1(c13)
+        c15 = torch.cat([c14, c10], dim=1)
+        c16 = self.bottom_up_layer_1(c15)
+        ## P4 -> P5
+        c17 = self.downsample_layer_2(c16)
+        c18 = torch.cat([c17, c6], dim=1)
+        c19 = self.bottom_up_layer_2(c18)
+
+        out_feats = [c13, c16, c19] # [P3, P4, P5]
+        
+        # output proj layers
+        if self.out_layers is not None:
+            out_feats_proj = []
+            for feat, layer in zip(out_feats, self.out_layers):
+                out_feats_proj.append(layer(feat))
+            return out_feats_proj
+
+        return out_feats
+
+
+def build_fpn(cfg, in_dims, out_dim=None):
+    model = cfg['fpn']
+    # build pafpn
+    if model == 'yolov5_pafpn':
+        fpn_net = Yolov5PaFPN(cfg, in_dims, out_dim)
+
+    return fpn_net

+ 2 - 2
train.sh

@@ -3,7 +3,7 @@ python train.py \
         --cuda \
         -d coco \
         --root /mnt/share/ssd2/dataset/ \
-        -m yolov8_nano \
+        -m yolov5_l \
         -bs 16 \
         -size 640 \
         --wp_epoch 1 \
@@ -12,7 +12,7 @@ python train.py \
         --ema \
         --fp16 \
         --multi_scale \
-        # --resume weights/coco/yolov7_large/yolov7_large_epoch_151_44.30.pth \
+        --resume weights/coco/yolov5_l/yolov5_l_epoch_221_46.56.pth \
         # --pretrained weights/coco/yolo_free_medium/yolo_free_medium_39.46.pth \
         # --eval_first