yolov8_head.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. import torch
  2. import torch.nn as nn
  3. from .yolov8_basic import BasicConv
  4. # -------------------- Detection Head --------------------
  5. ## Single-level Detection Head
  6. class DetHead(nn.Module):
  7. def __init__(self,
  8. in_dim :int = 256,
  9. cls_head_dim :int = 256,
  10. reg_head_dim :int = 256,
  11. num_cls_head :int = 2,
  12. num_reg_head :int = 2,
  13. act_type :str = "silu",
  14. norm_type :str = "BN",
  15. depthwise :bool = False):
  16. super().__init__()
  17. # --------- Basic Parameters ----------
  18. self.in_dim = in_dim
  19. self.num_cls_head = num_cls_head
  20. self.num_reg_head = num_reg_head
  21. self.act_type = act_type
  22. self.norm_type = norm_type
  23. self.depthwise = depthwise
  24. # --------- Network Parameters ----------
  25. ## cls head
  26. cls_feats = []
  27. self.cls_head_dim = cls_head_dim
  28. for i in range(num_cls_head):
  29. if i == 0:
  30. cls_feats.append(
  31. BasicConv(in_dim, self.cls_head_dim,
  32. kernel_size=3, padding=1, stride=1,
  33. act_type=act_type,
  34. norm_type=norm_type,
  35. depthwise=depthwise)
  36. )
  37. else:
  38. cls_feats.append(
  39. BasicConv(self.cls_head_dim, self.cls_head_dim,
  40. kernel_size=3, padding=1, stride=1,
  41. act_type=act_type,
  42. norm_type=norm_type,
  43. depthwise=depthwise)
  44. )
  45. ## reg head
  46. reg_feats = []
  47. self.reg_head_dim = reg_head_dim
  48. for i in range(num_reg_head):
  49. if i == 0:
  50. reg_feats.append(
  51. BasicConv(in_dim, self.reg_head_dim,
  52. kernel_size=3, padding=1, stride=1,
  53. act_type=act_type,
  54. norm_type=norm_type,
  55. depthwise=depthwise)
  56. )
  57. else:
  58. reg_feats.append(
  59. BasicConv(self.reg_head_dim, self.reg_head_dim,
  60. kernel_size=3, padding=1, stride=1,
  61. act_type=act_type,
  62. norm_type=norm_type,
  63. depthwise=depthwise)
  64. )
  65. self.cls_feats = nn.Sequential(*cls_feats)
  66. self.reg_feats = nn.Sequential(*reg_feats)
  67. self.init_weights()
  68. def init_weights(self):
  69. """Initialize the parameters."""
  70. for m in self.modules():
  71. if isinstance(m, torch.nn.Conv2d):
  72. # In order to be consistent with the source code,
  73. # reset the Conv2d initialization parameters
  74. m.reset_parameters()
  75. def forward(self, x):
  76. """
  77. in_feats: (Tensor) [B, C, H, W]
  78. """
  79. cls_feats = self.cls_feats(x)
  80. reg_feats = self.reg_feats(x)
  81. return cls_feats, reg_feats
  82. ## Multi-level Detection Head
  83. class Yolov8DetHead(nn.Module):
  84. def __init__(self, cfg, in_dims):
  85. super().__init__()
  86. ## ----------- Network Parameters -----------
  87. self.multi_level_heads = nn.ModuleList(
  88. [DetHead(in_dim = in_dims[level],
  89. cls_head_dim = max(in_dims[0], min(cfg.num_classes, 128)),
  90. reg_head_dim = max(in_dims[0]//4, 16, 4*cfg.reg_max),
  91. num_cls_head = cfg.num_cls_head,
  92. num_reg_head = cfg.num_reg_head,
  93. act_type = cfg.head_act,
  94. norm_type = cfg.head_norm,
  95. depthwise = cfg.head_depthwise)
  96. for level in range(cfg.num_levels)
  97. ])
  98. # --------- Basic Parameters ----------
  99. self.in_dims = in_dims
  100. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  101. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  102. def forward(self, feats):
  103. """
  104. feats: List[(Tensor)] [[B, C, H, W], ...]
  105. """
  106. cls_feats = []
  107. reg_feats = []
  108. for feat, head in zip(feats, self.multi_level_heads):
  109. # ---------------- Pred ----------------
  110. cls_feat, reg_feat = head(feat)
  111. cls_feats.append(cls_feat)
  112. reg_feats.append(reg_feat)
  113. return cls_feats, reg_feats
  114. # -------------------- Segmentation Head --------------------
  115. ## Single-level Segmentation Head (not complete yet)
  116. class SegHead(nn.Module):
  117. def __init__(self,
  118. in_dim :int = 256,
  119. cls_head_dim :int = 256,
  120. reg_head_dim :int = 256,
  121. seg_head_dim :int = 256,
  122. num_cls_head :int = 2,
  123. num_reg_head :int = 2,
  124. num_seg_head :int = 2,
  125. act_type :str = "silu",
  126. norm_type :str = "BN",
  127. depthwise :bool = False):
  128. super().__init__()
  129. # --------- Basic Parameters ----------
  130. self.in_dim = in_dim
  131. self.num_cls_head = num_cls_head
  132. self.num_reg_head = num_reg_head
  133. self.num_seg_head = num_seg_head
  134. self.act_type = act_type
  135. self.norm_type = norm_type
  136. self.depthwise = depthwise
  137. # --------- Network Parameters ----------
  138. ## cls head
  139. cls_feats = []
  140. self.cls_head_dim = cls_head_dim
  141. for i in range(num_cls_head):
  142. if i == 0:
  143. cls_feats.append(
  144. BasicConv(in_dim, self.cls_head_dim,
  145. kernel_size=3, padding=1, stride=1,
  146. act_type=act_type,
  147. norm_type=norm_type,
  148. depthwise=depthwise)
  149. )
  150. else:
  151. cls_feats.append(
  152. BasicConv(self.cls_head_dim, self.cls_head_dim,
  153. kernel_size=3, padding=1, stride=1,
  154. act_type=act_type,
  155. norm_type=norm_type,
  156. depthwise=depthwise)
  157. )
  158. ## reg head
  159. reg_feats = []
  160. self.reg_head_dim = reg_head_dim
  161. for i in range(num_reg_head):
  162. if i == 0:
  163. reg_feats.append(
  164. BasicConv(in_dim, self.reg_head_dim,
  165. kernel_size=3, padding=1, stride=1,
  166. act_type=act_type,
  167. norm_type=norm_type,
  168. depthwise=depthwise)
  169. )
  170. else:
  171. reg_feats.append(
  172. BasicConv(self.reg_head_dim, self.reg_head_dim,
  173. kernel_size=3, padding=1, stride=1,
  174. act_type=act_type,
  175. norm_type=norm_type,
  176. depthwise=depthwise)
  177. )
  178. ## seg head
  179. seg_feats = []
  180. self.seg_head_dim = seg_head_dim
  181. for i in range(num_reg_head):
  182. if i == 0:
  183. seg_feats.append(
  184. BasicConv(in_dim, self.seg_head_dim,
  185. kernel_size=3, padding=1, stride=1,
  186. act_type=act_type,
  187. norm_type=norm_type,
  188. depthwise=depthwise)
  189. )
  190. else:
  191. seg_feats.append(
  192. BasicConv(self.seg_head_dim, self.seg_head_dim,
  193. kernel_size=3, padding=1, stride=1,
  194. act_type=act_type,
  195. norm_type=norm_type,
  196. depthwise=depthwise)
  197. )
  198. self.cls_feats = nn.Sequential(*cls_feats)
  199. self.reg_feats = nn.Sequential(*reg_feats)
  200. self.seg_feats = nn.Sequential(*seg_feats)
  201. self.init_weights()
  202. def init_weights(self):
  203. """Initialize the parameters."""
  204. for m in self.modules():
  205. if isinstance(m, torch.nn.Conv2d):
  206. # In order to be consistent with the source code,
  207. # reset the Conv2d initialization parameters
  208. m.reset_parameters()
  209. def forward(self, x):
  210. """
  211. in_feats: (Tensor) [B, C, H, W]
  212. """
  213. cls_feats = self.cls_feats(x)
  214. reg_feats = self.reg_feats(x)
  215. seg_feats = self.seg_feats(x)
  216. return cls_feats, reg_feats, seg_feats
  217. ## Multi-level Segmentation Head (not complete yet)
  218. class YoloSegHead(nn.Module):
  219. def __init__(self, cfg, in_dims):
  220. super().__init__()
  221. ## ----------- Network Parameters -----------
  222. self.multi_level_heads = nn.ModuleList(
  223. [SegHead(in_dim = in_dims[level],
  224. cls_head_dim = max(in_dims[0], min(cfg.num_classes, 100)),
  225. reg_head_dim = max(in_dims[0]//4, 16, 4*cfg.reg_max),
  226. seg_head_dim = in_dims[0],
  227. num_cls_head = cfg.num_cls_head,
  228. num_reg_head = cfg.num_reg_head,
  229. num_seg_head = cfg.num_seg_head,
  230. act_type = cfg.head_act,
  231. norm_type = cfg.head_norm,
  232. depthwise = cfg.head_depthwise)
  233. for level in range(cfg.num_levels)
  234. ])
  235. # --------- Basic Parameters ----------
  236. self.in_dims = in_dims
  237. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  238. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  239. self.seg_head_dim = self.multi_level_heads[0].seg_head_dim
  240. def forward(self, feats):
  241. """
  242. feats: List[(Tensor)] [[B, C, H, W], ...]
  243. """
  244. cls_feats = []
  245. reg_feats = []
  246. seg_feats = []
  247. for feat, head in zip(feats, self.multi_level_heads):
  248. # ---------------- Pred ----------------
  249. cls_feat, reg_feat, seg_feat = head(feat)
  250. cls_feats.append(cls_feat)
  251. reg_feats.append(reg_feat)
  252. seg_feats.append(seg_feat)
  253. return cls_feats, reg_feats, seg_feats