rtcdet_head.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .rtcdet_basic import Conv
  5. except:
  6. from rtcdet_basic import Conv
  7. def build_head(cfg, in_dims, out_dim, num_levels=3):
  8. head = MDetHead(cfg, in_dims, out_dim, num_levels)
  9. return head
  10. # ---------------------------- Detection Head ----------------------------
  11. ## Single-level Detection Head
  12. class SDetHead(nn.Module):
  13. def __init__(self,
  14. in_dim :int = 256,
  15. cls_head_dim :int = 256,
  16. reg_head_dim :int = 256,
  17. num_cls_head :int = 2,
  18. num_reg_head :int = 2,
  19. act_type :str = "silu",
  20. norm_type :str = "BN",
  21. depthwise :bool = False):
  22. super().__init__()
  23. # --------- Basic Parameters ----------
  24. self.in_dim = in_dim
  25. self.num_cls_head = num_cls_head
  26. self.num_reg_head = num_reg_head
  27. self.act_type = act_type
  28. self.norm_type = norm_type
  29. self.depthwise = depthwise
  30. # --------- Network Parameters ----------
  31. ## cls head
  32. cls_feats = []
  33. self.cls_head_dim = cls_head_dim
  34. for i in range(num_cls_head):
  35. if i == 0:
  36. cls_feats.append(
  37. Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1,
  38. act_type=act_type,
  39. norm_type=norm_type,
  40. depthwise=depthwise)
  41. )
  42. else:
  43. cls_feats.append(
  44. Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1,
  45. act_type=act_type,
  46. norm_type=norm_type,
  47. depthwise=depthwise)
  48. )
  49. ## reg head
  50. reg_feats = []
  51. self.reg_head_dim = reg_head_dim
  52. for i in range(num_reg_head):
  53. if i == 0:
  54. reg_feats.append(
  55. Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1,
  56. act_type=act_type,
  57. norm_type=norm_type,
  58. depthwise=depthwise)
  59. )
  60. else:
  61. reg_feats.append(
  62. Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1,
  63. act_type=act_type,
  64. norm_type=norm_type,
  65. depthwise=depthwise)
  66. )
  67. self.cls_feats = nn.Sequential(*cls_feats)
  68. self.reg_feats = nn.Sequential(*reg_feats)
  69. self.init_weights()
  70. def init_weights(self):
  71. """Initialize the parameters."""
  72. for m in self.modules():
  73. if isinstance(m, torch.nn.Conv2d):
  74. # In order to be consistent with the source code,
  75. # reset the Conv2d initialization parameters
  76. m.reset_parameters()
  77. def forward(self, x):
  78. """
  79. in_feats: (Tensor) [B, C, H, W]
  80. """
  81. cls_feats = self.cls_feats(x)
  82. reg_feats = self.reg_feats(x)
  83. return cls_feats, reg_feats
  84. ## Multi-level Detection Head
  85. class MDetHead(nn.Module):
  86. def __init__(self, cfg, in_dims, out_dim, num_levels=3):
  87. super().__init__()
  88. ## ----------- Network Parameters -----------
  89. self.multi_level_heads = nn.ModuleList(
  90. [SDetHead(in_dim=in_dims[level],
  91. cls_head_dim = out_dim,
  92. reg_head_dim = out_dim,
  93. num_cls_head = cfg['num_cls_head'],
  94. num_reg_head = cfg['num_reg_head'],
  95. act_type = cfg['head_act'],
  96. norm_type = cfg['head_norm'],
  97. depthwise = cfg['head_depthwise'])
  98. for level in range(num_levels)
  99. ])
  100. # --------- Basic Parameters ----------
  101. self.in_dims = in_dims
  102. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  103. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  104. def forward(self, feats):
  105. """
  106. feats: List[(Tensor)] [[B, C, H, W], ...]
  107. """
  108. cls_feats = []
  109. reg_feats = []
  110. for feat, head in zip(feats, self.multi_level_heads):
  111. # ---------------- Pred ----------------
  112. cls_feat, reg_feat = head(feat)
  113. cls_feats.append(cls_feat)
  114. reg_feats.append(reg_feat)
  115. outputs = {
  116. "cls_feat": cls_feats,
  117. "reg_feat": reg_feats
  118. }
  119. return outputs
  120. if __name__ == '__main__':
  121. import time
  122. from thop import profile
  123. cfg = {
  124. 'head': 'decoupled_head',
  125. 'num_cls_head': 2,
  126. 'num_reg_head': 2,
  127. 'head_act': 'silu',
  128. 'head_norm': 'BN',
  129. 'head_depthwise': False,
  130. 'reg_max': 16,
  131. }
  132. fpn_dims = [256, 256, 256]
  133. out_dim = 256
  134. # Head-1
  135. model = build_head(cfg, fpn_dims, out_dim, num_levels=3)
  136. print(model)
  137. fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
  138. t0 = time.time()
  139. outputs = model(fpn_feats)
  140. t1 = time.time()
  141. print('Time: ', t1 - t0)
  142. # for out in outputs:
  143. # print(out.shape)
  144. print('==============================')
  145. flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
  146. print('==============================')
  147. print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  148. print('Head-1: Params : {:.2f} M'.format(params / 1e6))