yolovx_head.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import torch
  2. import torch.nn as nn
  3. from .yolovx_basic import Conv
  4. # Single-level Head
  5. class SingleLevelHead(nn.Module):
  6. def __init__(self, in_dim, out_dim, num_classes, num_cls_head, num_reg_head, act_type, norm_type, depthwise):
  7. super().__init__()
  8. # --------- Basic Parameters ----------
  9. self.in_dim = in_dim
  10. self.num_classes = num_classes
  11. self.num_cls_head = num_cls_head
  12. self.num_reg_head = num_reg_head
  13. self.act_type = act_type
  14. self.norm_type = norm_type
  15. self.depthwise = depthwise
  16. # --------- Network Parameters ----------
  17. ## cls head
  18. cls_feats = []
  19. self.cls_head_dim = max(out_dim, num_classes)
  20. for i in range(num_cls_head):
  21. if i == 0:
  22. cls_feats.append(
  23. Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1,
  24. act_type=act_type,
  25. norm_type=norm_type,
  26. depthwise=depthwise)
  27. )
  28. else:
  29. cls_feats.append(
  30. Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1,
  31. act_type=act_type,
  32. norm_type=norm_type,
  33. depthwise=depthwise)
  34. )
  35. ## reg head
  36. reg_feats = []
  37. self.reg_head_dim = out_dim
  38. for i in range(num_reg_head):
  39. if i == 0:
  40. reg_feats.append(
  41. Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1,
  42. act_type=act_type,
  43. norm_type=norm_type,
  44. depthwise=depthwise)
  45. )
  46. else:
  47. reg_feats.append(
  48. Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1,
  49. act_type=act_type,
  50. norm_type=norm_type,
  51. depthwise=depthwise)
  52. )
  53. self.cls_feats = nn.Sequential(*cls_feats)
  54. self.reg_feats = nn.Sequential(*reg_feats)
  55. def forward(self, x):
  56. """
  57. in_feats: (Tensor) [B, C, H, W]
  58. """
  59. cls_feats = self.cls_feats(x)
  60. reg_feats = self.reg_feats(x)
  61. return cls_feats, reg_feats
  62. # Multi-level Head
  63. class MultiLevelHead(nn.Module):
  64. def __init__(self, cfg, in_dims, out_dim, num_classes=80, num_levels=3):
  65. super().__init__()
  66. ## ----------- Network Parameters -----------
  67. self.multi_level_heads = nn.ModuleList(
  68. [SingleLevelHead(
  69. in_dims[level],
  70. out_dim,
  71. num_classes,
  72. cfg['num_cls_head'],
  73. cfg['num_reg_head'],
  74. cfg['head_act'],
  75. cfg['head_norm'],
  76. cfg['head_depthwise'])
  77. for level in range(num_levels)
  78. ])
  79. # --------- Basic Parameters ----------
  80. self.in_dims = in_dims
  81. self.num_classes = num_classes
  82. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  83. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  84. def forward(self, feats):
  85. """
  86. feats: List[(Tensor)] [[B, C, H, W], ...]
  87. """
  88. cls_feats = []
  89. reg_feats = []
  90. for feat, head in zip(feats, self.multi_level_heads):
  91. # ---------------- Pred ----------------
  92. cls_feat, reg_feat = head(feat)
  93. cls_feats.append(cls_feat)
  94. reg_feats.append(reg_feat)
  95. return cls_feats, reg_feats
  96. # build detection head
  97. def build_det_head(cfg, in_dim, out_dim, num_classes=80, num_levels=3):
  98. if cfg['head'] == 'decoupled_head':
  99. head = MultiLevelHead(cfg, in_dim, out_dim, num_classes, num_levels)
  100. return head