yolox2_head.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .yolox2_basic import Conv
  5. except:
  6. from yolox2_basic import Conv
  7. # Single-level Head
  8. class SingleLevelHead(nn.Module):
  9. def __init__(self,
  10. in_dim :int = 256,
  11. cls_head_dim :int = 256,
  12. reg_head_dim :int = 256,
  13. num_cls_head :int = 2,
  14. num_reg_head :int = 2,
  15. act_type :str = "silu",
  16. norm_type :str = "BN",
  17. depthwise :bool = False):
  18. super().__init__()
  19. # --------- Basic Parameters ----------
  20. self.in_dim = in_dim
  21. self.num_cls_head = num_cls_head
  22. self.num_reg_head = num_reg_head
  23. self.act_type = act_type
  24. self.norm_type = norm_type
  25. self.depthwise = depthwise
  26. # --------- Network Parameters ----------
  27. ## cls head
  28. cls_feats = []
  29. self.cls_head_dim = cls_head_dim
  30. for i in range(num_cls_head):
  31. if i == 0:
  32. cls_feats.append(
  33. Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1,
  34. act_type=act_type,
  35. norm_type=norm_type,
  36. depthwise=depthwise)
  37. )
  38. else:
  39. cls_feats.append(
  40. Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1,
  41. act_type=act_type,
  42. norm_type=norm_type,
  43. depthwise=depthwise)
  44. )
  45. ## reg head
  46. reg_feats = []
  47. self.reg_head_dim = reg_head_dim
  48. for i in range(num_reg_head):
  49. if i == 0:
  50. reg_feats.append(
  51. Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1,
  52. act_type=act_type,
  53. norm_type=norm_type,
  54. depthwise=depthwise)
  55. )
  56. else:
  57. reg_feats.append(
  58. Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1,
  59. act_type=act_type,
  60. norm_type=norm_type,
  61. depthwise=depthwise)
  62. )
  63. self.cls_feats = nn.Sequential(*cls_feats)
  64. self.reg_feats = nn.Sequential(*reg_feats)
  65. self.init_weights()
  66. def init_weights(self):
  67. """Initialize the parameters."""
  68. for m in self.modules():
  69. if isinstance(m, torch.nn.Conv2d):
  70. # In order to be consistent with the source code,
  71. # reset the Conv2d initialization parameters
  72. m.reset_parameters()
  73. def forward(self, x):
  74. """
  75. in_feats: (Tensor) [B, C, H, W]
  76. """
  77. cls_feats = self.cls_feats(x)
  78. reg_feats = self.reg_feats(x)
  79. return cls_feats, reg_feats
  80. # Multi-level Head
  81. class MultiLevelHead(nn.Module):
  82. def __init__(self, cfg, in_dims, out_dim, num_levels=3):
  83. super().__init__()
  84. ## ----------- Network Parameters -----------
  85. self.multi_level_heads = nn.ModuleList(
  86. [SingleLevelHead(in_dim=in_dims[level],
  87. cls_head_dim = out_dim,
  88. reg_head_dim = out_dim,
  89. num_cls_head = cfg['num_cls_head'],
  90. num_reg_head = cfg['num_reg_head'],
  91. act_type = cfg['head_act'],
  92. norm_type = cfg['head_norm'],
  93. depthwise = cfg['head_depthwise'])
  94. for level in range(num_levels)
  95. ])
  96. # --------- Basic Parameters ----------
  97. self.in_dims = in_dims
  98. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  99. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  100. def forward(self, feats):
  101. """
  102. feats: List[(Tensor)] [[B, C, H, W], ...]
  103. """
  104. cls_feats = []
  105. reg_feats = []
  106. for feat, head in zip(feats, self.multi_level_heads):
  107. # ---------------- Pred ----------------
  108. cls_feat, reg_feat = head(feat)
  109. cls_feats.append(cls_feat)
  110. reg_feats.append(reg_feat)
  111. return cls_feats, reg_feats
  112. # build detection head
  113. def build_det_head(cfg, in_dims, out_dim, num_levels=3):
  114. if cfg['head'] == 'decoupled_head':
  115. head = MultiLevelHead(cfg, in_dims, out_dim, num_levels)
  116. return head
  117. if __name__ == '__main__':
  118. import time
  119. from thop import profile
  120. cfg = {
  121. 'head': 'decoupled_head',
  122. 'num_cls_head': 2,
  123. 'num_reg_head': 2,
  124. 'head_act': 'silu',
  125. 'head_norm': 'BN',
  126. 'head_depthwise': False,
  127. 'reg_max': 16,
  128. }
  129. fpn_dims = [256, 256, 256]
  130. out_dim = 256
  131. # Head-1
  132. model = build_det_head(cfg, fpn_dims, out_dim, num_levels=3)
  133. print(model)
  134. fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
  135. t0 = time.time()
  136. outputs = model(fpn_feats)
  137. t1 = time.time()
  138. print('Time: ', t1 - t0)
  139. # for out in outputs:
  140. # print(out.shape)
  141. print('==============================')
  142. flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
  143. print('==============================')
  144. print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  145. print('Head-1: Params : {:.2f} M'.format(params / 1e6))