yolov1.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. import torch
  2. import torch.nn as nn
  3. import numpy as np
  4. from utils.nms import multiclass_nms
  5. from .yolov1_basic import Conv
  6. from .yolov1_neck import SPP
  7. from .yolov1_backbone import build_resnet
  8. # YOLOv1
  9. class YOLOv1(nn.Module):
  10. def __init__(self,
  11. device,
  12. img_size=None,
  13. num_classes=20,
  14. conf_thresh=0.01,
  15. nms_thresh=0.5,
  16. trainable=False):
  17. super(YOLOv1, self).__init__()
  18. # ------------------- Basic parameters -------------------
  19. self.img_size = img_size # 输入图像大小
  20. self.device = device # cuda或者是cpu
  21. self.num_classes = num_classes # 类别的数量
  22. self.trainable = trainable # 训练的标记
  23. self.conf_thresh = conf_thresh # 得分阈值
  24. self.nms_thresh = nms_thresh # NMS阈值
  25. self.stride = 32 # 网络的最大步长
  26. # ------------------- Network Structure -------------------
  27. ## backbone: resnet18
  28. self.backbone, feat_dim = build_resnet('resnet18', pretrained=trainable)
  29. ## neck: SPP
  30. self.neck = nn.Sequential(
  31. SPP(),
  32. Conv(feat_dim*4, feat_dim, k=1),
  33. )
  34. ## head
  35. self.convsets = nn.Sequential(
  36. Conv(feat_dim, feat_dim//2, k=1),
  37. Conv(feat_dim//2, feat_dim, k=3, p=1),
  38. Conv(feat_dim, feat_dim//2, k=1),
  39. Conv(feat_dim//2, feat_dim, k=3, p=1)
  40. )
  41. ## pred
  42. self.pred = nn.Conv2d(feat_dim, 1 + self.num_classes + 4, 1)
  43. if self.trainable:
  44. self.init_bias()
  45. def init_bias(self):
  46. # init bias
  47. init_prob = 0.01
  48. bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
  49. nn.init.constant_(self.pred.bias[..., :1], bias_value)
  50. nn.init.constant_(self.pred.bias[..., 1:1+self.num_classes], bias_value)
  51. def create_grid(self, fmp_size):
  52. """
  53. 用于生成G矩阵,其中每个元素都是特征图上的像素坐标。
  54. """
  55. # 特征图的宽和高
  56. ws, hs = fmp_size
  57. # 生成网格的x坐标和y坐标
  58. grid_y, grid_x = torch.meshgrid([torch.arange(hs), torch.arange(ws)])
  59. # 将xy两部分的坐标拼起来:[H, W, 2]
  60. grid_xy = torch.stack([grid_x, grid_y], dim=-1).float()
  61. # [H, W, 2] -> [HW, 2] -> [HW, 2]
  62. grid_xy = grid_xy.view(-1, 2).to(self.device)
  63. return grid_xy
  64. def decode_boxes(self, pred, fmp_size):
  65. """
  66. 将txtytwth转换为常用的x1y1x2y2形式。
  67. """
  68. # 生成网格坐标矩阵
  69. grid_cell = self.create_grid(fmp_size)
  70. # 计算预测边界框的中心点坐标和宽高
  71. pred[..., :2] = (torch.sigmoid(pred[..., :2]) + grid_cell) * self.stride
  72. pred[..., 2:] = torch.exp(pred[..., 2:])
  73. # 将所有bbox的中心带你坐标和宽高换算成x1y1x2y2形式
  74. output = torch.zeros_like(pred)
  75. output[..., :2] = pred[..., :2] - pred[..., 2:] * 0.5
  76. output[..., 2:] = pred[..., :2] + pred[..., 2:] * 0.5
  77. return output
  78. def postprocess(self, bboxes, scores):
  79. """
  80. Input:
  81. bboxes: [HxW, 4]
  82. scores: [HxW, num_classes]
  83. Output:
  84. bboxes: [N, 4]
  85. score: [N,]
  86. labels: [N,]
  87. """
  88. labels = np.argmax(scores, axis=1)
  89. scores = scores[(np.arange(scores.shape[0]), labels)]
  90. # threshold
  91. keep = np.where(scores >= self.conf_thresh)
  92. bboxes = bboxes[keep]
  93. scores = scores[keep]
  94. labels = labels[keep]
  95. # nms
  96. scores, labels, bboxes = multiclass_nms(
  97. scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
  98. return bboxes, scores, labels
  99. @torch.no_grad()
  100. def inference(self, x):
  101. # backbone主干网络
  102. feat = self.backbone(x)
  103. # neck网络
  104. feat = self.neck(feat)
  105. # detection head网络
  106. feat = self.convsets(feat)
  107. # 预测层
  108. pred = self.pred(feat)
  109. fmp_size = pred.shape[-2:]
  110. # 对pred 的size做一些view调整,便于后续的处理
  111. # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
  112. pred = pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
  113. # 从pred中分离出objectness预测、类别class预测、bbox的txtytwth预测
  114. # [B, H*W, 1]
  115. obj_pred = pred[..., :1]
  116. # [B, H*W, num_cls]
  117. cls_pred = pred[..., 1:1+self.num_classes]
  118. # [B, H*W, 4]
  119. reg_pred = pred[..., 1+self.num_classes:]
  120. # 测试时,笔者默认batch是1,
  121. # 因此,我们不需要用batch这个维度,用[0]将其取走。
  122. obj_pred = obj_pred[0] # [H*W, 1]
  123. cls_pred = cls_pred[0] # [H*W, NC]
  124. reg_pred = reg_pred[0] # [H*W, 4]
  125. # 每个边界框的得分
  126. scores = torch.sqrt(obj_pred.sigmoid() * cls_pred.sigmoid())
  127. # 解算边界框, 并归一化边界框: [H*W, 4]
  128. bboxes = self.decode_boxes(reg_pred, fmp_size)
  129. # 将预测放在cpu处理上,以便进行后处理
  130. scores = scores.cpu().numpy()
  131. bboxes = bboxes.cpu().numpy()
  132. # 后处理
  133. bboxes, scores, labels = self.postprocess(bboxes, scores)
  134. return bboxes, scores, labels
  135. def forward(self, x):
  136. if not self.trainable:
  137. return self.inference(x)
  138. else:
  139. # backbone主干网络
  140. feat = self.backbone(x)
  141. # neck网络
  142. feat = self.neck(feat)
  143. # detection head网络
  144. feat = self.convsets(feat)
  145. # 预测层
  146. pred = self.pred(feat)
  147. fmp_size = pred.shape[-2:]
  148. # 对pred 的size做一些view调整,便于后续的处理
  149. # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
  150. pred = pred.permute(0, 2, 3, 1).contiguous().flatten(1, 2)
  151. # 从pred中分离出objectness预测、类别class预测、bbox的txtytwth预测
  152. # [B, H*W, 1]
  153. obj_pred = pred[..., :1]
  154. # [B, H*W, num_cls]
  155. cls_pred = pred[..., 1:1+self.num_classes]
  156. # [B, H*W, 4]
  157. reg_pred = pred[..., 1+self.num_classes:]
  158. # decode bbox
  159. box_pred = self.decode_boxes(reg_pred, fmp_size)
  160. # 网络输出
  161. outputs = {"pred_obj": obj_pred, # (Tensor) [B, M, 1]
  162. "pred_cls": cls_pred, # (Tensor) [B, M, C]
  163. "pred_box": box_pred, # (Tensor) [B, M, 4]
  164. "stride": self.stride, # (Int)
  165. "fmp_size": fmp_size # (List) [fmp_h, fmp_w]
  166. }
  167. return outputs