yolo_free_v2_head.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. import torch
  2. import torch.nn as nn
  3. from .yolo_free_v2_basic import Conv
  4. class SingleLevelHead(nn.Module):
  5. def __init__(self, cfg, in_dim, out_dim, num_classes):
  6. super().__init__()
  7. # --------- Basic Parameters ----------
  8. self.in_dim = in_dim
  9. self.num_classes = num_classes
  10. self.num_cls_head = cfg['num_cls_head']
  11. self.num_reg_head = cfg['num_reg_head']
  12. self.act_type = cfg['head_act']
  13. self.norm_type = cfg['head_norm']
  14. self.depthwise = cfg['head_depthwise']
  15. # --------- Network Parameters ----------
  16. ## cls head
  17. cls_feats = []
  18. self.cls_head_dim = max(out_dim, num_classes)
  19. for i in range(self.num_cls_head):
  20. if i == 0:
  21. cls_feats.append(
  22. Conv(in_dim, self.cls_head_dim, k=3, p=1, s=1,
  23. act_type=self.act_type,
  24. norm_type=self.norm_type,
  25. depthwise=self.depthwise)
  26. )
  27. else:
  28. cls_feats.append(
  29. Conv(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1,
  30. act_type=self.act_type,
  31. norm_type=self.norm_type,
  32. depthwise=self.depthwise)
  33. )
  34. ## reg head
  35. reg_feats = []
  36. self.reg_head_dim = max(out_dim, 4*cfg['reg_max'])
  37. for i in range(self.num_reg_head):
  38. if i == 0:
  39. reg_feats.append(
  40. Conv(in_dim, self.reg_head_dim, k=3, p=1, s=1,
  41. act_type=self.act_type,
  42. norm_type=self.norm_type,
  43. depthwise=self.depthwise)
  44. )
  45. else:
  46. reg_feats.append(
  47. Conv(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1,
  48. act_type=self.act_type,
  49. norm_type=self.norm_type,
  50. depthwise=self.depthwise)
  51. )
  52. self.cls_feats = nn.Sequential(*cls_feats)
  53. self.reg_feats = nn.Sequential(*reg_feats)
  54. def forward(self, x):
  55. """
  56. in_feats: (Tensor) [B, C, H, W]
  57. """
  58. cls_feats = self.cls_feats(x)
  59. reg_feats = self.reg_feats(x)
  60. return cls_feats, reg_feats
  61. class MultiLevelHead(nn.Module):
  62. def __init__(self, cfg, in_dims, out_dim, num_classes=80, num_levels=3):
  63. super().__init__()
  64. ## ----------- Network Parameters -----------
  65. self.multi_level_heads = nn.ModuleList(
  66. [SingleLevelHead(
  67. cfg,
  68. in_dims[level],
  69. out_dim,
  70. num_classes)
  71. for level in range(num_levels)
  72. ])
  73. # --------- Basic Parameters ----------
  74. self.in_dims = in_dims
  75. self.num_classes = num_classes
  76. self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
  77. self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
  78. def forward(self, feats):
  79. """
  80. feats: List[(Tensor)] [[B, C, H, W], ...]
  81. """
  82. cls_feats = []
  83. reg_feats = []
  84. for feat, head in zip(feats, self.multi_level_heads):
  85. # ---------------- Pred ----------------
  86. cls_feat, reg_feat = head(feat)
  87. cls_feats.append(cls_feat)
  88. reg_feats.append(reg_feat)
  89. return cls_feats, reg_feats
  90. # build detection head
  91. def build_det_head(cfg, in_dim, out_dim, num_classes=80, num_levels=3):
  92. if cfg['head'] == 'decoupled_head':
  93. head = MultiLevelHead(cfg, in_dim, out_dim, num_classes, num_levels)
  94. return head