yolov7_af_head.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .yolov7_af_basic import BasicConv
  5. except:
  6. from yolov7_af_basic import BasicConv
  7. ## Single-level Detection Head
  8. class DetHead(nn.Module):
  9. def __init__(self,
  10. in_dim :int = 256,
  11. cls_head_dim :int = 256,
  12. reg_head_dim :int = 256,
  13. num_cls_head :int = 2,
  14. num_reg_head :int = 2,
  15. act_type :str = "silu",
  16. norm_type :str = "BN",
  17. depthwise :bool = False):
  18. super().__init__()
  19. # --------- Basic Parameters ----------
  20. self.in_dim = in_dim
  21. self.num_cls_head = num_cls_head
  22. self.num_reg_head = num_reg_head
  23. self.act_type = act_type
  24. self.norm_type = norm_type
  25. self.depthwise = depthwise
  26. # --------- Network Parameters ----------
  27. ## cls head
  28. cls_feats = []
  29. self.cls_head_dim = cls_head_dim
  30. for i in range(num_cls_head):
  31. if i == 0:
  32. cls_feats.append(
  33. BasicConv(in_dim, self.cls_head_dim,
  34. kernel_size=3, padding=1, stride=1,
  35. act_type=act_type,
  36. norm_type=norm_type,
  37. depthwise=depthwise)
  38. )
  39. else:
  40. cls_feats.append(
  41. BasicConv(self.cls_head_dim, self.cls_head_dim,
  42. kernel_size=3, padding=1, stride=1,
  43. act_type=act_type,
  44. norm_type=norm_type,
  45. depthwise=depthwise)
  46. )
  47. ## reg head
  48. reg_feats = []
  49. self.reg_head_dim = reg_head_dim
  50. for i in range(num_reg_head):
  51. if i == 0:
  52. reg_feats.append(
  53. BasicConv(in_dim, self.reg_head_dim,
  54. kernel_size=3, padding=1, stride=1,
  55. act_type=act_type,
  56. norm_type=norm_type,
  57. depthwise=depthwise)
  58. )
  59. else:
  60. reg_feats.append(
  61. BasicConv(self.reg_head_dim, self.reg_head_dim,
  62. kernel_size=3, padding=1, stride=1,
  63. act_type=act_type,
  64. norm_type=norm_type,
  65. depthwise=depthwise)
  66. )
  67. self.cls_feats = nn.Sequential(*cls_feats)
  68. self.reg_feats = nn.Sequential(*reg_feats)
  69. self.init_weights()
  70. def init_weights(self):
  71. """Initialize the parameters."""
  72. for m in self.modules():
  73. if isinstance(m, torch.nn.Conv2d):
  74. # In order to be consistent with the source code,
  75. # reset the Conv2d initialization parameters
  76. m.reset_parameters()
  77. def forward(self, x):
  78. """
  79. in_feats: (Tensor) [B, C, H, W]
  80. """
  81. cls_feats = self.cls_feats(x)
  82. reg_feats = self.reg_feats(x)
  83. return cls_feats, reg_feats
  84. ## Multi-level Detection Head
  85. class Yolov7DetHead(nn.Module):
  86. def __init__(self, cfg, in_dims):
  87. super().__init__()
  88. ## ----------- Network Parameters -----------
  89. self.multi_level_heads = nn.ModuleList(
  90. [DetHead(in_dim = in_dims[level],
  91. cls_head_dim = round(cfg.head_dim * cfg.width),
  92. reg_head_dim = round(cfg.head_dim * cfg.width),
  93. num_cls_head = cfg.num_cls_head,
  94. num_reg_head = cfg.num_reg_head,
  95. act_type = cfg.head_act,
  96. norm_type = cfg.head_norm,
  97. depthwise = cfg.head_depthwise)
  98. for level in range(cfg.num_levels)
  99. ])
  100. # --------- Basic Parameters ----------
  101. self.in_dims = in_dims
  102. self.cls_head_dim = cfg.head_dim
  103. self.reg_head_dim = cfg.head_dim
  104. def forward(self, feats):
  105. """
  106. feats: List[(Tensor)] [[B, C, H, W], ...]
  107. """
  108. cls_feats = []
  109. reg_feats = []
  110. for feat, head in zip(feats, self.multi_level_heads):
  111. # ---------------- Pred ----------------
  112. cls_feat, reg_feat = head(feat)
  113. cls_feats.append(cls_feat)
  114. reg_feats.append(reg_feat)
  115. return cls_feats, reg_feats
  116. if __name__=='__main__':
  117. import time
  118. from thop import profile
  119. # Model config
  120. # YOLOv7-Base config
  121. class Yolov7BaseConfig(object):
  122. def __init__(self) -> None:
  123. # ---------------- Model config ----------------
  124. self.width = 0.50
  125. self.out_stride = [8, 16, 32]
  126. self.max_stride = 32
  127. self.num_levels = 3
  128. ## Head
  129. self.head_act = 'lrelu'
  130. self.head_norm = 'BN'
  131. self.head_depthwise = False
  132. self.head_dim = 256
  133. self.num_cls_head = 2
  134. self.num_reg_head = 2
  135. cfg = Yolov7BaseConfig()
  136. # Build a head
  137. pyramid_feats = [torch.randn(1, cfg.head_dim, 80, 80),
  138. torch.randn(1, cfg.head_dim, 40, 40),
  139. torch.randn(1, cfg.head_dim, 20, 20)]
  140. head = Yolov7DetHead(cfg, [cfg.head_dim]*3)
  141. # Inference
  142. t0 = time.time()
  143. cls_feats, reg_feats = head(pyramid_feats)
  144. t1 = time.time()
  145. print('Time: ', t1 - t0)
  146. print("====== Yolov7 Head output ======")
  147. for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
  148. print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
  149. flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
  150. print('==============================')
  151. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  152. print('Params : {:.2f} M'.format(params / 1e6))