rtcdet_v2_basic.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. from typing import List
  2. import numpy as np
  3. import torch
  4. import torch.nn as nn
  5. # ---------------------------- Base Conv Module ----------------------------
  6. class SiLU(nn.Module):
  7. """export-friendly version of nn.SiLU()"""
  8. @staticmethod
  9. def forward(x):
  10. return x * torch.sigmoid(x)
  11. def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
  12. conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
  13. return conv
  14. def get_activation(act_type=None):
  15. if act_type == 'relu':
  16. return nn.ReLU(inplace=True)
  17. elif act_type == 'lrelu':
  18. return nn.LeakyReLU(0.1, inplace=True)
  19. elif act_type == 'mish':
  20. return nn.Mish(inplace=True)
  21. elif act_type == 'silu':
  22. return nn.SiLU(inplace=True)
  23. elif act_type is None:
  24. return nn.Identity()
  25. def get_norm(norm_type, dim):
  26. if norm_type == 'BN':
  27. return nn.BatchNorm2d(dim)
  28. elif norm_type == 'GN':
  29. return nn.GroupNorm(num_groups=32, num_channels=dim)
  30. ## Basic Conv Module
  31. class Conv(nn.Module):
  32. def __init__(self,
  33. c1, # in channels
  34. c2, # out channels
  35. k=1, # kernel size
  36. p=0, # padding
  37. s=1, # padding
  38. d=1, # dilation
  39. act_type='lrelu', # activation
  40. norm_type='BN', # normalization
  41. depthwise=False):
  42. super(Conv, self).__init__()
  43. convs = []
  44. add_bias = False if norm_type else True
  45. p = p if d == 1 else d
  46. if depthwise:
  47. convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1, bias=add_bias))
  48. # depthwise conv
  49. if norm_type is not None:
  50. convs.append(get_norm(norm_type, c1))
  51. if act_type is not None:
  52. convs.append(get_activation(act_type))
  53. # pointwise conv
  54. convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1, bias=add_bias))
  55. if norm_type is not None:
  56. convs.append(get_norm(norm_type, c2))
  57. if act_type is not None:
  58. convs.append(get_activation(act_type))
  59. else:
  60. convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1, bias=add_bias))
  61. if norm_type is not None:
  62. convs.append(get_norm(norm_type, c2))
  63. if act_type is not None:
  64. convs.append(get_activation(act_type))
  65. self.convs = nn.Sequential(*convs)
  66. def forward(self, x):
  67. return self.convs(x)
  68. ## Partial Conv Module
  69. class PartialConv(nn.Module):
  70. def __init__(self, in_dim, out_dim, split_ratio=0.25, kernel_size=1, stride=1, act_type=None, norm_type=None):
  71. super().__init__()
  72. # ----------- Basic Parameters -----------
  73. assert in_dim == out_dim
  74. self.in_dim = in_dim
  75. self.out_dim = out_dim
  76. self.split_ratio = split_ratio
  77. self.split_dim = round(in_dim * split_ratio)
  78. self.untouched_dim = in_dim - self.split_dim
  79. self.kernel_size = kernel_size
  80. self.padding = kernel_size // 2
  81. self.stride = stride
  82. self.act_type = act_type
  83. self.norm_type = norm_type
  84. # ----------- Network Parameters -----------
  85. self.partial_conv = Conv(self.split_dim, self.split_dim, self.kernel_size, self.padding, self.stride, act_type=act_type, norm_type=norm_type)
  86. def forward(self, x):
  87. x1, x2 = torch.split(x, [self.split_dim, self.untouched_dim], dim=1)
  88. x1 = self.partial_conv(x1)
  89. x = torch.cat((x1, x2), 1)
  90. return x
  91. ## Channel Shuffle
  92. class ChannelShuffle(nn.Module):
  93. def __init__(self, groups=1) -> None:
  94. super().__init__()
  95. self.groups = groups
  96. def forward(self, x):
  97. # type: (torch.Tensor, int) -> torch.Tensor
  98. batchsize, num_channels, height, width = x.data.size()
  99. channels_per_group = num_channels // self.groups
  100. # reshape
  101. x = x.view(batchsize, self.groups,
  102. channels_per_group, height, width)
  103. x = torch.transpose(x, 1, 2).contiguous()
  104. # flatten
  105. x = x.view(batchsize, -1, height, width)
  106. return x
  107. ## Inverse BottleNeck
  108. class InverseBottleneck(nn.Module):
  109. def __init__(self,
  110. in_dim,
  111. out_dim,
  112. expand_ratio=2.0,
  113. shortcut=False,
  114. act_type='silu',
  115. norm_type='BN',
  116. depthwise=False):
  117. super(InverseBottleneck, self).__init__()
  118. # ----------- Basic Parameters -----------
  119. self.in_dim = in_dim
  120. self.out_dim = out_dim
  121. self.expand_dim = int(in_dim * expand_ratio)
  122. # ----------- Network Parameters -----------
  123. self.cv1 = Conv(in_dim, in_dim, k=3, p=1, act_type=None, norm_type=norm_type, depthwise=depthwise)
  124. self.cv2 = Conv(in_dim, self.expand_dim, k=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  125. self.cv3 = Conv(self.expand_dim, out_dim, k=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  126. self.shortcut = shortcut and in_dim == out_dim
  127. def forward(self, x):
  128. h = self.cv3(self.cv2(self.cv1(x)))
  129. return x + h if self.shortcut else h
  130. ## YOLO-style BottleNeck
  131. class YoloBottleneck(nn.Module):
  132. def __init__(self,
  133. in_dim,
  134. out_dim,
  135. kernel_sizes :List[int] = [3, 3],
  136. expand_ratio :float = 0.5,
  137. shortcut :bool = False,
  138. act_type :str = 'silu',
  139. norm_type :str = 'BN',
  140. depthwise :bool = False):
  141. super(YoloBottleneck, self).__init__()
  142. # ------------------ Basic parameters ------------------
  143. self.in_dim = in_dim
  144. self.out_dim = out_dim
  145. self.inter_dim = int(out_dim * expand_ratio)
  146. self.shortcut = shortcut and in_dim == out_dim
  147. # ------------------ Network parameters ------------------
  148. self.cv1 = Conv(in_dim, self.inter_dim, k=kernel_sizes[0], p=kernel_sizes[0]//2, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
  149. self.cv2 = Conv(self.inter_dim, out_dim, k=kernel_sizes[1], p=kernel_sizes[1]//2, norm_type=norm_type, act_type=act_type, depthwise=depthwise)
  150. def forward(self, x):
  151. h = self.cv2(self.cv1(x))
  152. return x + h if self.shortcut else h
  153. # ---------------------------- Base Modules ----------------------------
  154. ## ELAN Block
  155. class ELANBlock(nn.Module):
  156. def __init__(self, in_dim, out_dim, expand_ratio :float=0.5, branch_depth :int=1, shortcut=False, act_type='silu', norm_type='BN', depthwise=False):
  157. super().__init__()
  158. # ----------- Basic Parameters -----------
  159. self.in_dim = in_dim
  160. self.out_dim = out_dim
  161. self.inter_dim = round(in_dim * expand_ratio)
  162. self.expand_ratio = expand_ratio
  163. self.branch_depth = branch_depth
  164. self.shortcut = shortcut
  165. # ----------- Network Parameters -----------
  166. self.cv1 = Conv(in_dim, self.inter_dim, k=1, act_type=act_type, norm_type=norm_type)
  167. self.cv2 = Conv(in_dim, self.inter_dim, k=1, act_type=act_type, norm_type=norm_type)
  168. self.cv3 = nn.Sequential(*[
  169. Conv(self.inter_dim, self.inter_dim, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  170. for _ in range(branch_depth)
  171. ])
  172. self.cv4 = nn.Sequential(*[
  173. Conv(self.inter_dim, self.inter_dim, k=3, p=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  174. for _ in range(branch_depth)
  175. ])
  176. self.out = Conv(self.inter_dim*4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
  177. def forward(self, x):
  178. x1 = self.cv1(x)
  179. x2 = self.cv2(x)
  180. x3 = self.cv3(x2) + x2 if self.shortcut else self.cv3(x2)
  181. x4 = self.cv4(x3) + x3 if self.shortcut else self.cv4(x3)
  182. # [B, C, H, W] -> [B, 2C, H, W]
  183. out = self.out(torch.cat([x1, x2, x3, x4], dim=1))
  184. return out
  185. ## DownSample Block
  186. class DSBlock(nn.Module):
  187. def __init__(self, in_dim, out_dim, act_type='silu', norm_type='BN', depthwise=False):
  188. super().__init__()
  189. inter_dim = out_dim // 2
  190. self.branch_1 = nn.Sequential(
  191. nn.MaxPool2d((2, 2), 2),
  192. Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
  193. )
  194. self.branch_2 = nn.Sequential(
  195. Conv(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
  196. Conv(inter_dim, inter_dim, k=3, p=1, s=2, act_type=act_type, norm_type=norm_type, depthwise=depthwise)
  197. )
  198. def forward(self, x):
  199. x1 = self.branch_1(x)
  200. x2 = self.branch_2(x)
  201. out = torch.cat([x1, x2], dim=1)
  202. return out
  203. # ---------------------------- FPN Modules ----------------------------
  204. ## build fpn's core block
  205. def build_fpn_block(cfg, in_dim, out_dim):
  206. if cfg['fpn_core_block'] == 'elan_block':
  207. layer = ELANBlock(in_dim = in_dim,
  208. out_dim = out_dim,
  209. expand_ratio = cfg['fpn_expand_ratio'],
  210. branch_depth = round(3 * cfg['depth']),
  211. shortcut = False,
  212. act_type = cfg['fpn_act'],
  213. norm_type = cfg['fpn_norm'],
  214. depthwise = cfg['fpn_depthwise']
  215. )
  216. return layer
  217. ## build fpn's reduce layer
  218. def build_reduce_layer(cfg, in_dim, out_dim):
  219. if cfg['fpn_reduce_layer'] == 'conv':
  220. layer = Conv(in_dim, out_dim, k=1, act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'])
  221. return layer
  222. ## build fpn's downsample layer
  223. def build_downsample_layer(cfg, in_dim, out_dim):
  224. if cfg['fpn_downsample_layer'] == 'conv':
  225. layer = Conv(in_dim, out_dim, k=3, s=2, p=1,
  226. act_type=cfg['fpn_act'], norm_type=cfg['fpn_norm'], depthwise=cfg['fpn_depthwise'])
  227. elif cfg['fpn_downsample_layer'] == 'maxpool':
  228. assert in_dim == out_dim
  229. layer = nn.MaxPool2d((2, 2), stride=2)
  230. return layer