Kaynağa Gözat

use RTCDet trainer for our YOLOv5

冬落 2 yıl önce
ebeveyn
işleme
8785462882

+ 2 - 2
config/model_config/yolov3_config.py

@@ -45,7 +45,7 @@ yolov3_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov3_tiny':{
@@ -92,7 +92,7 @@ yolov3_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
 }

+ 2 - 2
config/model_config/yolov4_config.py

@@ -45,7 +45,7 @@ yolov4_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov4_tiny':{
@@ -92,7 +92,7 @@ yolov4_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
 }

+ 10 - 15
config/model_config/yolov5_config.py

@@ -5,7 +5,6 @@ yolov5_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -33,7 +32,7 @@ yolov5_cfg = {
                         [116, 90],  [156, 198], [373, 326]],  # P5
         # ---------------- Train config ----------------
         ## input
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
         'trans_type': 'yolov5_nano',
         # ---------------- Assignment config ----------------
         ## matcher
@@ -44,14 +43,13 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov5_s':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -79,7 +77,7 @@ yolov5_cfg = {
                         [116, 90],  [156, 198], [373, 326]],  # P5
         # ---------------- Train config ----------------
         ## input
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
         'trans_type': 'yolov5_small',
         # ---------------- Assignment config ----------------
         ## matcher
@@ -90,14 +88,13 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov5_m':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -125,7 +122,7 @@ yolov5_cfg = {
                         [116, 90],  [156, 198], [373, 326]],  # P5
         # ---------------- Train config ----------------
         ## input
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
         'trans_type': 'yolov5_medium',
         # ---------------- Assignment config ----------------
         ## matcher
@@ -136,14 +133,13 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov5_l':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -171,7 +167,7 @@ yolov5_cfg = {
                         [116, 90],  [156, 198], [373, 326]],  # P5
         # ---------------- Train config ----------------
         ## input
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
         'trans_type': 'yolov5_large',
         # ---------------- Assignment config ----------------
         ## matcher
@@ -182,14 +178,13 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
     'yolov5_x':{
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -217,7 +212,7 @@ yolov5_cfg = {
                         [116, 90],  [156, 198], [373, 326]],  # P5
         # ---------------- Train config ----------------
         ## input
-        'multi_scale': [0.5, 1.0],   # 320 -> 640
+        'multi_scale': [0.5, 1.5],   # 320 -> 960
         'trans_type': 'yolov5_huge',
         # ---------------- Assignment config ----------------
         ## matcher
@@ -228,7 +223,7 @@ yolov5_cfg = {
         'loss_cls_weight': 1.0,
         'loss_box_weight': 5.0,
         # ---------------- Train config ----------------
-        'trainer_type': 'yolov8',
+        'trainer_type': 'rtcdet',
     },
 
 }

+ 0 - 5
config/model_config/yolox_config.py

@@ -6,7 +6,6 @@ yolox_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -50,7 +49,6 @@ yolox_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': True,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -94,7 +92,6 @@ yolox_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': False,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -138,7 +135,6 @@ yolox_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': False,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,
@@ -182,7 +178,6 @@ yolox_cfg = {
         # ---------------- Model config ----------------
         ## Backbone
         'backbone': 'cspdarknet',
-        'pretrained': False,
         'bk_act': 'silu',
         'bk_norm': 'BN',
         'bk_dpw': False,

+ 7 - 7
models/detectors/yolov5/yolov5.py

@@ -12,12 +12,12 @@ class YOLOv5(nn.Module):
     def __init__(self, 
                  cfg,
                  device, 
-                 num_classes = 20, 
-                 conf_thresh = 0.05,
-                 nms_thresh = 0.6,
-                 trainable = False, 
-                 topk = 1000,
-                 deploy = False,
+                 num_classes = 20,
+                 conf_thresh = 0.01,
+                 nms_thresh  = 0.5,
+                 topk        = 1000,
+                 trainable   = False,
+                 deploy      = False,
                  nms_class_agnostic = False):
         super(YOLOv5, self).__init__()
         # ---------------------- Basic Parameters ----------------------
@@ -41,7 +41,7 @@ class YOLOv5(nn.Module):
         
         # ------------------- Network Structure -------------------
         ## Backbone
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
+        self.backbone, feats_dim = build_backbone(cfg)
         
         ## FPN
         self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))

+ 46 - 78
models/detectors/yolov5/yolov5_backbone.py

@@ -8,47 +8,62 @@ except:
     from yolov5_basic import Conv, CSPBlock
     from yolov5_neck import SPPF
 
-model_urls = {
-    "cspdarknet_nano": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_nano.pth",
-    "cspdarknet_small": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_small.pth",
-    "cspdarknet_medium": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_medium.pth",
-    "cspdarknet_large": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_large.pth",
-    "cspdarknet_huge": None,
-}
 
 # CSPDarkNet
 class CSPDarkNet(nn.Module):
     def __init__(self, depth=1.0, width=1.0, act_type='silu', norm_type='BN', depthwise=False):
         super(CSPDarkNet, self).__init__()
-        self.feat_dims = [int(256*width), int(512*width), int(1024*width)]
-
-        # P1
-        self.layer_1 = Conv(3, int(64*width), k=6, p=2, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        
-        # P2
+        self.feat_dims = [round(64 * width), round(128 * width), round(256 * width), round(512 * width), round(1024 * width)]
+        # P1/2
+        self.layer_1 = Conv(3, self.feat_dims[0], k=6, p=2, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        # P2/4
         self.layer_2 = nn.Sequential(
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(128*width), int(128*width), expand_ratio=0.5, nblocks=int(3*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[1],
+                     out_dim      = self.feat_dims[1],
+                     expand_ratio = 0.5,
+                     nblocks      = round(3*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P3
+        # P3/8
         self.layer_3 = nn.Sequential(
-            Conv(int(128*width), int(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(256*width), int(256*width), expand_ratio=0.5, nblocks=int(9*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[1], self.feat_dims[2], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[2],
+                     out_dim      = self.feat_dims[2],
+                     expand_ratio = 0.5,
+                     nblocks      = round(9*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P4
+        # P4/16
         self.layer_4 = nn.Sequential(
-            Conv(int(256*width), int(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(512*width), int(512*width), expand_ratio=0.5, nblocks=int(9*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[2], self.feat_dims[3], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[3],
+                     out_dim      = self.feat_dims[3],
+                     expand_ratio = 0.5,
+                     nblocks      = round(9*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P5
+        # P5/32
         self.layer_5 = nn.Sequential(
-            Conv(int(512*width), int(1024*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            SPPF(int(1024*width), int(1024*width), expand_ratio=0.5),
-            CSPBlock(int(1024*width), int(1024*width), expand_ratio=0.5, nblocks=int(3*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[3], self.feat_dims[4], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            SPPF(self.feat_dims[4], self.feat_dims[4], expand_ratio=0.5),
+            CSPBlock(in_dim       = self.feat_dims[4],
+                     out_dim      = self.feat_dims[4],
+                     expand_ratio = 0.5,
+                     nblocks      = round(3*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
 
 
@@ -65,57 +80,10 @@ class CSPDarkNet(nn.Module):
 
 
 # ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
 ## build CSPDarkNet
-def build_backbone(cfg, pretrained=False): 
-    """Constructs a darknet-53 model.
-    Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
-    """
+def build_backbone(cfg): 
     backbone = CSPDarkNet(cfg['depth'], cfg['width'], cfg['bk_act'], cfg['bk_norm'], cfg['bk_dpw'])
-    feat_dims = backbone.feat_dims
-
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_nano')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='cspdarknet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='cspdarknet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_huge')
+    feat_dims = backbone.feat_dims[-3:]
 
     return backbone, feat_dims
 

+ 1 - 1
models/detectors/yolox/yolox.py

@@ -34,7 +34,7 @@ class YOLOX(nn.Module):
                 
         # ------------------- Network Structure -------------------
         ## 主干网络
-        self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
+        self.backbone, feats_dim = build_backbone(cfg)
         
         ## 特征金字塔
         self.fpn = build_fpn(cfg=cfg, in_dims=feats_dim, out_dim=round(256*cfg['width']))

+ 46 - 79
models/detectors/yolox/yolox_backbone.py

@@ -8,47 +8,62 @@ except:
     from yolox_basic import Conv, CSPBlock
     from yolox_neck import SPPF
 
-model_urls = {
-    "cspdarknet_nano": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_nano.pth",
-    "cspdarknet_small": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_small.pth",
-    "cspdarknet_medium": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_medium.pth",
-    "cspdarknet_large": "https://github.com/yjh0410/image_classification_pytorch/releases/download/weight/cspdarknet_large.pth",
-    "cspdarknet_huge": None,
-}
 
 # CSPDarkNet
 class CSPDarkNet(nn.Module):
     def __init__(self, depth=1.0, width=1.0, act_type='silu', norm_type='BN', depthwise=False):
         super(CSPDarkNet, self).__init__()
-        self.feat_dims = [int(256*width), int(512*width), int(1024*width)]
-
-        # P1
-        self.layer_1 = Conv(3, int(64*width), k=6, p=2, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        
-        # P2
+        self.feat_dims = [round(64 * width), round(128 * width), round(256 * width), round(512 * width), round(1024 * width)]
+        # P1/2
+        self.layer_1 = Conv(3, self.feat_dims[0], k=6, p=2, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+        # P2/4
         self.layer_2 = nn.Sequential(
-            Conv(int(64*width), int(128*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(128*width), int(128*width), expand_ratio=0.5, nblocks=int(3*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[0], self.feat_dims[1], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[1],
+                     out_dim      = self.feat_dims[1],
+                     expand_ratio = 0.5,
+                     nblocks      = round(3*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P3
+        # P3/8
         self.layer_3 = nn.Sequential(
-            Conv(int(128*width), int(256*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(256*width), int(256*width), expand_ratio=0.5, nblocks=int(9*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[1], self.feat_dims[2], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[2],
+                     out_dim      = self.feat_dims[2],
+                     expand_ratio = 0.5,
+                     nblocks      = round(9*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P4
+        # P4/16
         self.layer_4 = nn.Sequential(
-            Conv(int(256*width), int(512*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            CSPBlock(int(512*width), int(512*width), expand_ratio=0.5, nblocks=int(9*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[2], self.feat_dims[3], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            CSPBlock(in_dim       = self.feat_dims[3],
+                     out_dim      = self.feat_dims[3],
+                     expand_ratio = 0.5,
+                     nblocks      = round(9*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
-        # P5
+        # P5/32
         self.layer_5 = nn.Sequential(
-            Conv(int(512*width), int(1024*width), k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
-            SPPF(int(1024*width), int(1024*width), expand_ratio=0.5),
-            CSPBlock(int(1024*width), int(1024*width), expand_ratio=0.5, nblocks=int(3*depth),
-                     shortcut=True, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
+            Conv(self.feat_dims[3], self.feat_dims[4], k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise),
+            SPPF(self.feat_dims[4], self.feat_dims[4], expand_ratio=0.5),
+            CSPBlock(in_dim       = self.feat_dims[4],
+                     out_dim      = self.feat_dims[4],
+                     expand_ratio = 0.5,
+                     nblocks      = round(3*depth),
+                     shortcut     = True,
+                     act_type     = act_type,
+                     norm_type    = norm_type,
+                     depthwise    = depthwise)
         )
 
 
@@ -65,57 +80,10 @@ class CSPDarkNet(nn.Module):
 
 
 # ---------------------------- Functions ----------------------------
-## load pretrained weight
-def load_weight(model, model_name):
-    # load weight
-    print('Loading pretrained weight ...')
-    url = model_urls[model_name]
-    if url is not None:
-        checkpoint = torch.hub.load_state_dict_from_url(
-            url=url, map_location="cpu", check_hash=True)
-        # checkpoint state dict
-        checkpoint_state_dict = checkpoint.pop("model")
-        # model state dict
-        model_state_dict = model.state_dict()
-        # check
-        for k in list(checkpoint_state_dict.keys()):
-            if k in model_state_dict:
-                shape_model = tuple(model_state_dict[k].shape)
-                shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                if shape_model != shape_checkpoint:
-                    checkpoint_state_dict.pop(k)
-            else:
-                checkpoint_state_dict.pop(k)
-                print(k)
-
-        model.load_state_dict(checkpoint_state_dict)
-    else:
-        print('No pretrained for {}'.format(model_name))
-
-    return model
-
-
 ## build CSPDarkNet
-def build_backbone(cfg, pretrained=False): 
-    """Constructs a darknet-53 model.
-    Args:
-        pretrained (bool): If True, returns a model pre-trained on ImageNet
-    """
+def build_backbone(cfg): 
     backbone = CSPDarkNet(cfg['depth'], cfg['width'], cfg['bk_act'], cfg['bk_norm'], cfg['bk_dpw'])
-    feat_dims = backbone.feat_dims
-
-    # check whether to load imagenet pretrained weight
-    if pretrained:
-        if cfg['width'] == 0.25 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_nano')
-        elif cfg['width'] == 0.5 and cfg['depth'] == 0.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_small')
-        elif cfg['width'] == 0.75 and cfg['depth'] == 0.67:
-            backbone = load_weight(backbone, model_name='cspdarknet_medium')
-        elif cfg['width'] == 1.0 and cfg['depth'] == 1.0:
-            backbone = load_weight(backbone, model_name='cspdarknet_large')
-        elif cfg['width'] == 1.25 and cfg['depth'] == 1.34:
-            backbone = load_weight(backbone, model_name='cspdarknet_huge')
+    feat_dims = backbone.feat_dims[-3:]
 
     return backbone, feat_dims
 
@@ -124,7 +92,6 @@ if __name__ == '__main__':
     import time
     from thop import profile
     cfg = {
-        'pretrained': False,
         'bk_act': 'lrelu',
         'bk_norm': 'BN',
         'bk_dpw': False,

+ 16 - 2
train.sh

@@ -35,13 +35,27 @@ elif [[ $MODEL == *"yolov7"* ]]; then
     WP_EPOCH=3
     EVAL_EPOCH=10
     NO_AUG_EPOCH=20
-elif [[ $MODEL == *"yolov5"* || $MODEL == *"yolov4"* || $MODEL == *"yolov3"* ]]; then
+elif [[ $MODEL == *"yolov5"* ]]; then
     # Epoch setting
     BATCH_SIZE=128
     MAX_EPOCH=300
     WP_EPOCH=3
     EVAL_EPOCH=10
-    NO_AUG_EPOCH=15
+    NO_AUG_EPOCH=20
+elif [[ $MODEL == *"yolov4"* ]]; then
+    # Epoch setting
+    BATCH_SIZE=128
+    MAX_EPOCH=300
+    WP_EPOCH=3
+    EVAL_EPOCH=10
+    NO_AUG_EPOCH=20
+elif [[ $MODEL == *"yolov3"* ]]; then
+    # Epoch setting
+    BATCH_SIZE=128
+    MAX_EPOCH=300
+    WP_EPOCH=3
+    EVAL_EPOCH=10
+    NO_AUG_EPOCH=20
 else
     # Epoch setting
     BATCH_SIZE=128