yjh0410 1 rok pred
rodič
commit
1fd1e0a096

+ 0 - 9
yolo/config/yolov2_config.py

@@ -16,16 +16,7 @@ class Yolov2BaseConfig(object):
         ## Backbone
         self.backbone       = 'resnet50'
         self.use_pretrained = True
-        ## Neck
-        self.neck_act       = 'lrelu'
-        self.neck_norm      = 'BN'
-        self.neck_depthwise = False
-        self.neck_expand_ratio = 0.5
-        self.spp_pooling_size  = 5
         ## Head
-        self.head_act  = 'lrelu'
-        self.head_norm = 'BN'
-        self.head_depthwise = False
         self.head_dim  = 512
         self.num_cls_head = 2
         self.num_reg_head = 2

+ 2 - 3
yolo/models/yolov1/modules.py

@@ -1,10 +1,9 @@
 import torch
 import torch.nn as nn
-from typing import List
 
 
 # --------------------- Basic modules ---------------------
-class BasicConv(nn.Module):
+class ConvModule(nn.Module):
     def __init__(self, 
                  in_dim,                   # in channels
                  out_dim,                  # out channels 
@@ -13,7 +12,7 @@ class BasicConv(nn.Module):
                  stride=1,                 # padding
                  dilation=1,               # dilation
                 ):
-        super(BasicConv, self).__init__()
+        super(ConvModule, self).__init__()
         self.conv = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False)
         self.norm = nn.BatchNorm2d(out_dim)
         self.act  = nn.LeakyReLU(0.1, inplace=True)

+ 6 - 6
yolo/models/yolov1/yolov1_head.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .modules import BasicConv
+    from .modules import ConvModule
 except:
-    from  modules import BasicConv
+    from  modules import ConvModule
 
 
 class Yolov1DetHead(nn.Module):
@@ -22,16 +22,16 @@ class Yolov1DetHead(nn.Module):
         cls_feats = []
         for i in range(self.num_cls_head):
             if i == 0:
-                cls_feats.append(BasicConv(in_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
+                cls_feats.append(ConvModule(in_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
             else:
-                cls_feats.append(BasicConv(self.cls_head_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
+                cls_feats.append(ConvModule(self.cls_head_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
         ## reg head
         reg_feats = []
         for i in range(self.num_reg_head):
             if i == 0:
-                reg_feats.append(BasicConv(in_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
+                reg_feats.append(ConvModule(in_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
             else:
-                reg_feats.append(BasicConv(self.reg_head_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
+                reg_feats.append(ConvModule(self.reg_head_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
         self.cls_feats = nn.Sequential(*cls_feats)
         self.reg_feats = nn.Sequential(*reg_feats)
 

+ 4 - 4
yolo/models/yolov1/yolov1_neck.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .modules import BasicConv
+    from .modules import ConvModule
 except:
-    from  modules import BasicConv
+    from  modules import ConvModule
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -18,8 +18,8 @@ class SPPF(nn.Module):
         inter_dim = in_dim // 2
         self.out_dim = out_dim
         ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim, kernel_size=1, padding=0, stride=1)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim, kernel_size=1, padding=0, stride=1)
+        self.cv1 = ConvModule(in_dim, inter_dim, kernel_size=1, padding=0, stride=1)
+        self.cv2 = ConvModule(inter_dim * 4, out_dim, kernel_size=1, padding=0, stride=1)
         self.m = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
 
         # Initialize all layers

+ 23 - 0
yolo/models/yolov2/modules.py

@@ -0,0 +1,23 @@
+import torch
+import torch.nn as nn
+from typing import List
+
+
+# --------------------- Basic modules ---------------------
+class ConvModule(nn.Module):
+    def __init__(self, 
+                 in_dim,                   # in channels
+                 out_dim,                  # out channels 
+                 kernel_size=1,            # kernel size 
+                 padding=0,                # padding
+                 stride=1,                 # padding
+                 dilation=1,               # dilation
+                ):
+        super(ConvModule, self).__init__()
+        self.conv = nn.Conv2d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False)
+        self.norm = nn.BatchNorm2d(out_dim)
+        self.act  = nn.LeakyReLU(0.1, inplace=True)
+
+    def forward(self, x):
+        return self.act(self.norm(self.conv(x)))
+

+ 0 - 69
yolo/models/yolov2/yolov2_basic.py

@@ -1,69 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-
-# --------------------- Basic modules ---------------------
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-        
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 dilation=1,               # dilation
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                 depthwise :bool = False
-                ):
-        super(BasicConv, self).__init__()
-        self.depthwise = depthwise
-        use_bias = False if norm_type is not None else True
-        if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
-            self.norm = get_norm(norm_type, out_dim)
-        else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
-            self.norm1 = get_norm(norm_type, in_dim)
-            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
-            self.norm2 = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        if not self.depthwise:
-            return self.act(self.norm(self.conv(x)))
-        else:
-            # Depthwise conv
-            x = self.norm1(self.conv1(x))
-            # Pointwise conv
-            x = self.act(self.norm2(self.conv2(x)))
-            return x

+ 6 - 35
yolo/models/yolov2/yolov2_head.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .yolov2_basic import BasicConv
+    from .modules import ConvModule
 except:
-    from  yolov2_basic import BasicConv
+    from  modules import ConvModule
 
 
 class Yolov2DetHead(nn.Module):
@@ -16,49 +16,22 @@ class Yolov2DetHead(nn.Module):
         self.reg_head_dim = cfg.head_dim
         self.num_cls_head = cfg.num_cls_head
         self.num_reg_head = cfg.num_reg_head
-        self.act_type     = cfg.head_act
-        self.norm_type    = cfg.head_norm
-        self.depthwise    = cfg.head_depthwise
         
         # --------- Network Parameters ----------
         ## cls head
         cls_feats = []
         for i in range(self.num_cls_head):
             if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type  = self.act_type,
-                              norm_type = self.norm_type,
-                              depthwise = self.depthwise)
-                              )
+                cls_feats.append(ConvModule(in_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
             else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type  = self.act_type,
-                              norm_type = self.norm_type,
-                              depthwise = self.depthwise)
-                              )
+                cls_feats.append(ConvModule(self.cls_head_dim, self.cls_head_dim, kernel_size=3, padding=1, stride=1))
         ## reg head
         reg_feats = []
         for i in range(self.num_reg_head):
             if i == 0:
-                reg_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type  = self.act_type,
-                              norm_type = self.norm_type,
-                              depthwise = self.depthwise)
-                              )
+                reg_feats.append(ConvModule(in_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
             else:
-                reg_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type  = self.act_type,
-                              norm_type = self.norm_type,
-                              depthwise = self.depthwise)
-                              )
+                reg_feats.append(ConvModule(self.reg_head_dim, self.reg_head_dim, kernel_size=3, padding=1, stride=1))
         self.cls_feats = nn.Sequential(*cls_feats)
         self.reg_feats = nn.Sequential(*reg_feats)
 
@@ -68,8 +41,6 @@ class Yolov2DetHead(nn.Module):
         """Initialize the parameters."""
         for m in self.modules():
             if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
                 m.reset_parameters()
 
     def forward(self, x):

+ 6 - 14
yolo/models/yolov2/yolov2_neck.py

@@ -2,9 +2,9 @@ import torch
 import torch.nn as nn
 
 try:
-    from .yolov2_basic import BasicConv
+    from .modules import ConvModule
 except:
-    from  yolov2_basic import BasicConv
+    from  modules import ConvModule
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -15,18 +15,12 @@ class SPPF(nn.Module):
     def __init__(self, cfg, in_dim, out_dim):
         super().__init__()
         ## ----------- Basic Parameters -----------
-        inter_dim = round(in_dim * cfg.neck_expand_ratio)
+        inter_dim = in_dim // 2
         self.out_dim = out_dim
         ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
-                              stride=1,
-                              padding=cfg.spp_pooling_size // 2)
+        self.cv1 = ConvModule(in_dim, inter_dim, kernel_size=1, padding=0, stride=1)
+        self.cv2 = ConvModule(inter_dim * 4, out_dim, kernel_size=1, padding=0, stride=1)
+        self.m = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
 
         # Initialize all layers
         self.init_weights()
@@ -35,8 +29,6 @@ class SPPF(nn.Module):
         """Initialize the parameters."""
         for m in self.modules():
             if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
                 m.reset_parameters()
 
     def forward(self, x):