Bläddra i källkod

modify engine

yjh0410 2 år sedan
förälder
incheckning
a4fba8de72

+ 18 - 0
config/yolov1_config.py

@@ -3,6 +3,24 @@
 yolov1_cfg = {
     # input
     'trans_type': 'ssd',
+    # model
+    'backbone': 'resnet18',
+    'pretrained': True,
+    'stride': 32,  # P5
+    # neck
+    'neck': 'sppf',
+    'expand_ratio': 0.5,
+    'pooling_size': 5,
+    'neck_act': 'lrelu',
+    'neck_norm': 'BN',
+    'neck_depthwise': False,
+    # head
+    'head': 'decoupled_head',
+    'head_act': 'silu',
+    'head_norm': 'BN',
+    'num_cls_head': 2,
+    'num_reg_head': 2,
+    'head_depthwise': False,
     # loss weight
     'loss_obj_weight': 1.0,
     'loss_cls_weight': 1.0,

+ 7 - 6
models/yolov1/build.py

@@ -14,12 +14,13 @@ def build_yolov1(args, cfg, device, num_classes=80, trainable=False):
     print('Model Configuration: \n', cfg)
     
     model = YOLOv1(
-        device=device,
-        img_size=args.img_size,
-        num_classes=num_classes,
-        conf_thresh=args.conf_thresh,
-        nms_thresh=args.nms_thresh,
-        trainable=trainable
+        cfg = cfg,
+        device = device,
+        img_size = args.img_size,
+        num_classes = num_classes,
+        conf_thresh = args.conf_thresh,
+        nms_thresh = args.nms_thresh,
+        trainable = trainable
         )
 
     criterion = None

+ 0 - 1
models/yolov1/loss.py

@@ -32,7 +32,6 @@ class Criterion(object):
 
     def loss_bboxes(self, pred_box, gt_box):
         # regression loss
-        print(pred_box, gt_box)
         ious = get_ious(pred_box,
                         gt_box,
                         box_mode="xyxy",

+ 40 - 50
models/yolov1/yolov1.py

@@ -4,14 +4,15 @@ import numpy as np
 
 from utils.nms import multiclass_nms
 
-from .yolov1_basic import Conv
-from .yolov1_neck import SPP
-from .yolov1_backbone import build_resnet
+from .yolov1_backbone import build_backbone
+from .yolov1_neck import build_neck
+from .yolov1_head import build_head
 
 
 # YOLOv1
 class YOLOv1(nn.Module):
     def __init__(self,
+                 cfg,
                  device,
                  img_size=None,
                  num_classes=20,
@@ -20,6 +21,7 @@ class YOLOv1(nn.Module):
                  trainable=False):
         super(YOLOv1, self).__init__()
         # ------------------- Basic parameters -------------------
+        self.cfg = cfg                                 # 模型配置文件
         self.img_size = img_size                       # 输入图像大小
         self.device = device                           # cuda或者是cpu
         self.num_classes = num_classes                 # 类别的数量
@@ -30,24 +32,20 @@ class YOLOv1(nn.Module):
         
         # ------------------- Network Structure -------------------
         ## backbone: resnet18
-        self.backbone, feat_dim = build_resnet('resnet18', pretrained=trainable)
+        self.backbone, feat_dim = build_backbone(
+            cfg['backbone'], trainable&cfg['pretrained'])
 
         ## neck: SPP
-        self.neck = nn.Sequential(
-            SPP(),
-            Conv(feat_dim*4, feat_dim, k=1),
-        )
+        self.neck = build_neck(cfg, feat_dim, out_dim=256)
+        head_dim = self.neck.out_dim
 
         ## head
-        self.convsets = nn.Sequential(
-            Conv(feat_dim, feat_dim//2, k=1),
-            Conv(feat_dim//2, feat_dim, k=3, p=1),
-            Conv(feat_dim, feat_dim//2, k=1),
-            Conv(feat_dim//2, feat_dim, k=3, p=1)
-        )
+        self.head = build_head(cfg, head_dim, head_dim, num_classes)
 
         ## pred
-        self.pred = nn.Conv2d(feat_dim, 1 + self.num_classes + 4, 1)
+        self.obj_pred = nn.Conv2d(head_dim, 1, kernel_size=1)
+        self.cls_pred = nn.Conv2d(head_dim, num_classes, kernel_size=1)
+        self.reg_pred = nn.Conv2d(head_dim, 4, kernel_size=1)
     
 
         if self.trainable:
@@ -58,8 +56,8 @@ class YOLOv1(nn.Module):
         # init bias
         init_prob = 0.01
         bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-        nn.init.constant_(self.pred.bias[..., :1], bias_value)
-        nn.init.constant_(self.pred.bias[..., 1:1+self.num_classes], bias_value)
+        nn.init.constant_(self.obj_pred.bias, bias_value)
+        nn.init.constant_(self.cls_pred.bias, bias_value)
 
 
     def create_grid(self, fmp_size):
@@ -90,7 +88,7 @@ class YOLOv1(nn.Module):
 
         # 计算预测边界框的中心点坐标和宽高
         pred_ctr = (torch.sigmoid(pred[..., :2]) + grid_cell) * self.stride
-        pred_wh = torch.exp(pred[..., 2:])
+        pred_wh = torch.exp(pred[..., 2:]) * self.stride
 
         # 将所有bbox的中心带你坐标和宽高换算成x1y1x2y2形式
         pred_x1y1 = pred_ctr - pred_wh * 0.5
@@ -129,30 +127,26 @@ class YOLOv1(nn.Module):
 
     @torch.no_grad()
     def inference(self, x):
-        # backbone主干网络
+        # 主干网络
         feat = self.backbone(x)
 
-        # neck网络
+        # 颈部网络
         feat = self.neck(feat)
 
-        # detection head网络
-        feat = self.convsets(feat)
+        # 检测头
+        cls_feat, reg_feat = self.head(feat)
 
         # 预测层
-        pred = self.pred(feat)
-        fmp_size = pred.shape[-2:]
+        obj_pred = self.obj_pred(cls_feat)
+        cls_pred = self.cls_pred(cls_feat)
+        reg_pred = self.reg_pred(reg_feat)
+        fmp_size = obj_pred.shape[-2:]
 
-        # 对pred 的size做一些view调整,便于后续的处理
+        # 对 pred 的size做一些view调整,便于后续的处理
         # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
-        pred = pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
-
-        # 从pred中分离出objectness预测、类别class预测、bbox的txtytwth预测  
-        # [B, H*W, 1]
-        obj_pred = pred[..., :1]
-        # [B, H*W, num_cls]
-        cls_pred = pred[..., 1:1+self.num_classes]
-        # [B, H*W, 4]
-        reg_pred = pred[..., 1+self.num_classes:]
+        obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
 
         # 测试时,笔者默认batch是1,
         # 因此,我们不需要用batch这个维度,用[0]将其取走。
@@ -180,30 +174,26 @@ class YOLOv1(nn.Module):
         if not self.trainable:
             return self.inference(x)
         else:
-            # backbone主干网络
+            # 主干网络
             feat = self.backbone(x)
 
-            # neck网络
+            # 颈部网络
             feat = self.neck(feat)
 
-            # detection head网络
-            feat = self.convsets(feat)
+            # 检测头
+            cls_feat, reg_feat = self.head(feat)
 
             # 预测层
-            pred = self.pred(feat)
-            fmp_size = pred.shape[-2:]
+            obj_pred = self.obj_pred(cls_feat)
+            cls_pred = self.cls_pred(cls_feat)
+            reg_pred = self.reg_pred(reg_feat)
+            fmp_size = obj_pred.shape[-2:]
 
-            # 对pred 的size做一些view调整,便于后续的处理
+            # 对 pred 的size做一些view调整,便于后续的处理
             # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
-            pred = pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
-
-            # 从pred中分离出objectness预测、类别class预测、bbox的txtytwth预测  
-            # [B, H*W, 1]
-            obj_pred = pred[..., :1]
-            # [B, H*W, num_cls]
-            cls_pred = pred[..., 1:1+self.num_classes]
-            # [B, H*W, 4]
-            reg_pred = pred[..., 1+self.num_classes:]
+            obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+            cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
+            reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
 
             # decode bbox
             box_pred = self.decode_boxes(reg_pred, fmp_size)

+ 2 - 2
models/yolov1/yolov1_backbone.py

@@ -222,7 +222,7 @@ def resnet152(pretrained=False, **kwargs):
     return model
 
 ## build resnet
-def build_resnet(model_name='resnet18', pretrained=False):
+def build_backbone(model_name='resnet18', pretrained=False):
     if model_name == 'resnet18':
         model = resnet18(pretrained)
         feat_dim = 512
@@ -240,7 +240,7 @@ def build_resnet(model_name='resnet18', pretrained=False):
 
 
 if __name__=='__main__':
-    model, feat_dim = build_resnet(model_name='resnet18', pretrained=True)
+    model, feat_dim = build_backbone(model_name='resnet18', pretrained=True)
     print(model)
 
     input = torch.randn(1, 3, 512, 512)

+ 68 - 7
models/yolov1/yolov1_basic.py

@@ -1,16 +1,77 @@
 import torch
 import torch.nn as nn
-import torch.nn.functional as F
 
 
+class SiLU(nn.Module):
+    """export-friendly version of nn.SiLU()"""
+
+    @staticmethod
+    def forward(x):
+        return x * torch.sigmoid(x)
+
+
+def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
+    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
+
+    return conv
+
+
+def get_activation(act_type=None):
+    if act_type == 'relu':
+        return nn.ReLU(inplace=True)
+    elif act_type == 'lrelu':
+        return nn.LeakyReLU(0.1, inplace=True)
+    elif act_type == 'mish':
+        return nn.Mish(inplace=True)
+    elif act_type == 'silu':
+        return nn.SiLU(inplace=True)
+
+
+def get_norm(norm_type, dim):
+    if norm_type == 'BN':
+        return nn.BatchNorm2d(dim)
+    elif norm_type == 'GN':
+        return nn.GroupNorm(num_groups=32, num_channels=dim)
+
+
+# Basic conv layer
 class Conv(nn.Module):
-    def __init__(self, in_dim, out_dim, k, s=1, p=0, d=1, g=1, act=True):
+    def __init__(self, 
+                 c1,                   # in channels
+                 c2,                   # out channels 
+                 k=1,                  # kernel size 
+                 p=0,                  # padding
+                 s=1,                  # padding
+                 d=1,                  # dilation
+                 act_type='lrelu',     # activation
+                 norm_type='BN',       # normalization
+                 depthwise=False):
         super(Conv, self).__init__()
-        self.convs = nn.Sequential(
-            nn.Conv2d(in_dim, out_dim, k, stride=s, padding=p, dilation=d, groups=g),
-            nn.BatchNorm2d(out_dim),
-            nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
-        )
+        convs = []
+        add_bias = False if norm_type else True
+        if depthwise:
+            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
+            # depthwise conv
+            if norm_type:
+                convs.append(get_norm(norm_type, c1))
+            if act_type:
+                convs.append(get_activation(act_type))
+            # pointwise conv
+            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+
+        else:
+            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
+            if norm_type:
+                convs.append(get_norm(norm_type, c2))
+            if act_type:
+                convs.append(get_activation(act_type))
+            
+        self.convs = nn.Sequential(*convs)
+
 
     def forward(self, x):
         return self.convs(x)

+ 137 - 0
models/yolov1/yolov1_head.py

@@ -0,0 +1,137 @@
+import torch
+import torch.nn as nn
+try:
+    from .yolov1_basic import Conv
+except:
+    from yolov1_basic import Conv
+
+
+class DecoupledHead(nn.Module):
+    def __init__(self, cfg, in_dim, out_dim, num_classes=80):
+        super().__init__()
+        print('==============================')
+        print('Head: Decoupled Head')
+        self.in_dim = in_dim
+        self.num_cls_head=cfg['num_cls_head']
+        self.num_reg_head=cfg['num_reg_head']
+        self.act_type=cfg['head_act']
+        self.norm_type=cfg['head_norm']
+
+        # cls head
+        cls_feats = []
+        self.cls_out_dim = max(out_dim, num_classes)
+        for i in range(cfg['num_cls_head']):
+            if i == 0:
+                cls_feats.append(
+                    Conv(in_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                cls_feats.append(
+                    Conv(self.cls_out_dim, self.cls_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+                
+        # reg head
+        reg_feats = []
+        self.reg_out_dim = max(out_dim, 64)
+        for i in range(cfg['num_reg_head']):
+            if i == 0:
+                reg_feats.append(
+                    Conv(in_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+            else:
+                reg_feats.append(
+                    Conv(self.reg_out_dim, self.reg_out_dim, k=3, p=1, s=1, 
+                        act_type=self.act_type,
+                        norm_type=self.norm_type,
+                        depthwise=cfg['head_depthwise'])
+                        )
+
+        self.cls_feats = nn.Sequential(*cls_feats)
+        self.reg_feats = nn.Sequential(*reg_feats)
+
+
+    def forward(self, x):
+        """
+            in_feats: (Tensor) [B, C, H, W]
+        """
+        cls_feats = self.cls_feats(x)
+        reg_feats = self.reg_feats(x)
+
+        return cls_feats, reg_feats
+    
+
+# build detection head
+def build_head(cfg, in_dim, out_dim, num_classes=80):
+    head = DecoupledHead(cfg, in_dim, out_dim, num_classes) 
+
+    return head
+
+
+if __name__ == '__main__':
+    import time
+    from thop import profile
+    cfg = {
+        'num_cls_head': 2,
+        'num_reg_head': 2,
+        'head_act': 'silu',
+        'head_norm': 'BN',
+        'head_depthwise': False,
+        'reg_max': 16,
+    }
+    fpn_dims = [256, 512, 512]
+    # Head-1
+    model = build_head(cfg, 256, fpn_dims, num_classes=80)
+    x = torch.randn(1, 256, 80, 80)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-1: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-2
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 40, 40)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-2: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-2: Params : {:.2f} M'.format(params / 1e6))
+
+    # Head-3
+    model = build_head(cfg, 512, fpn_dims, num_classes=80)
+    x = torch.randn(1, 512, 20, 20)
+    t0 = time.time()
+    outputs = model(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    # for out in outputs:
+    #     print(out.shape)
+
+    print('==============================')
+    flops, params = profile(model, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('Head-3: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Head-3: Params : {:.2f} M'.format(params / 1e6))

+ 25 - 23
models/yolov1/yolov1_neck.py

@@ -1,38 +1,40 @@
 import torch
 import torch.nn as nn
-import torch.nn.functional as F
+from .yolov1_basic import Conv
 
 
-# Spatial Pyramid Pooling
-class SPP(nn.Module):
-    """
-        Spatial Pyramid Pooling
-    """
-    def __init__(self):
-        super(SPP, self).__init__()
+# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+class SPPF(nn.Module):
+    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type='lrelu', norm_type='BN'):
+        super().__init__()
+        inter_dim = int(in_dim * expand_ratio)
+        self.out_dim = out_dim
+        self.cv1 = Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.cv2 = Conv(inter_dim * 4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
+        self.m = nn.MaxPool2d(kernel_size=pooling_size, stride=1, padding=pooling_size // 2)
 
     def forward(self, x):
-        """
-        Input:
-            x: (Tensor) -> [B, C, H, W]
-        Output:
-            y: (Tensor) -> [B, 4C, H, W]
-        """
-        x_1 = F.max_pool2d(x, 5, stride=1, padding=2)
-        x_2 = F.max_pool2d(x, 9, stride=1, padding=4)
-        x_3 = F.max_pool2d(x, 13, stride=1, padding=6)
-        y = torch.cat([x, x_1, x_2, x_3], dim=1)
+        x = self.cv1(x)
+        y1 = self.m(x)
+        y2 = self.m(y1)
 
-        return y
+        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
 
 
-def build_neck(cfg):
+def build_neck(cfg, in_dim, out_dim):
     model = cfg['neck']
     print('==============================')
     print('Neck: {}'.format(model))
     # build neck
-    if model == 'spp':
-        neck = SPP()
+    if model == 'sppf':
+        neck = SPPF(
+            in_dim=in_dim,
+            out_dim=out_dim,
+            expand_ratio=cfg['expand_ratio'], 
+            pooling_size=cfg['pooling_size'],
+            act_type=cfg['neck_act'],
+            norm_type=cfg['neck_norm']
+            )
 
     return neck
-    
+