rtcdet_v2.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. # --------------- Torch components ---------------
  2. import torch
  3. import torch.nn as nn
  4. # --------------- Model components ---------------
  5. from .rtcdet_v2_backbone import build_backbone
  6. from .rtcdet_v2_neck import build_neck
  7. from .rtcdet_v2_pafpn import build_fpn
  8. from .rtcdet_v2_head import build_det_head
  9. from .rtcdet_v2_pred import build_pred_layer
  10. # --------------- External components ---------------
  11. from utils.misc import multiclass_nms
  12. # My RTCDet
  13. class RTCDet(nn.Module):
  14. def __init__(self,
  15. cfg,
  16. device,
  17. num_classes :int = 20,
  18. conf_thresh :float = 0.05,
  19. nms_thresh :float = 0.6,
  20. topk :int = 1000,
  21. trainable :bool = False,
  22. deploy :bool = False):
  23. super(RTCDet, self).__init__()
  24. # ---------------------- Basic Parameters ----------------------
  25. self.cfg = cfg
  26. self.device = device
  27. self.stride = cfg['stride']
  28. self.num_classes = num_classes
  29. self.trainable = trainable
  30. self.conf_thresh = conf_thresh
  31. self.nms_thresh = nms_thresh
  32. self.topk = topk
  33. self.deploy = deploy
  34. self.head_dim = round(256*cfg['width'])
  35. # ---------------------- Network Parameters ----------------------
  36. ## ----------- Backbone -----------
  37. self.backbone, feats_dim = build_backbone(cfg, trainable&cfg['pretrained'])
  38. ## ----------- Neck: SPP -----------
  39. self.neck = build_neck(cfg, feats_dim[-1], feats_dim[-1])
  40. feats_dim[-1] = self.neck.out_dim
  41. ## ----------- Neck: FPN -----------
  42. self.fpn = build_fpn(cfg, feats_dim, round(256*cfg['width']))
  43. self.fpn_dims = self.fpn.out_dim
  44. ## ----------- Heads -----------
  45. self.det_heads = build_det_head(
  46. cfg, self.fpn_dims, self.head_dim, num_classes, num_levels=len(self.stride))
  47. ## ----------- Preds -----------
  48. self.pred_layers = build_pred_layer(
  49. self.det_heads.cls_head_dim, self.det_heads.reg_head_dim,
  50. self.stride, num_classes, num_coords=4, num_levels=len(self.stride))
  51. ## post-process
  52. def post_process(self, cls_preds, box_preds):
  53. """
  54. Input:
  55. cls_preds: List(Tensor) [[H x W, C], ...]
  56. box_preds: List(Tensor) [[H x W, 4], ...]
  57. """
  58. all_scores = []
  59. all_labels = []
  60. all_bboxes = []
  61. for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
  62. cls_pred_i = cls_pred_i[0]
  63. box_pred_i = box_pred_i[0]
  64. # (H x W x C,)
  65. scores_i = cls_pred_i.sigmoid().flatten()
  66. # Keep top k top scoring indices only.
  67. num_topk = min(self.topk, box_pred_i.size(0))
  68. # torch.sort is actually faster than .topk (at least on GPUs)
  69. predicted_prob, topk_idxs = scores_i.sort(descending=True)
  70. topk_scores = predicted_prob[:num_topk]
  71. topk_idxs = topk_idxs[:num_topk]
  72. # filter out the proposals with low confidence score
  73. keep_idxs = topk_scores > self.conf_thresh
  74. scores = topk_scores[keep_idxs]
  75. topk_idxs = topk_idxs[keep_idxs]
  76. anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  77. labels = topk_idxs % self.num_classes
  78. bboxes = box_pred_i[anchor_idxs]
  79. all_scores.append(scores)
  80. all_labels.append(labels)
  81. all_bboxes.append(bboxes)
  82. scores = torch.cat(all_scores)
  83. labels = torch.cat(all_labels)
  84. bboxes = torch.cat(all_bboxes)
  85. # to cpu & numpy
  86. scores = scores.cpu().numpy()
  87. labels = labels.cpu().numpy()
  88. bboxes = bboxes.cpu().numpy()
  89. # nms
  90. scores, labels, bboxes = multiclass_nms(
  91. scores, labels, bboxes, self.nms_thresh, self.num_classes, False)
  92. return bboxes, scores, labels
  93. # ---------------------- Main Process for Inference ----------------------
  94. @torch.no_grad()
  95. def inference_single_image(self, x):
  96. # ---------------- Backbone ----------------
  97. pyramid_feats = self.backbone(x)
  98. # ---------------- Neck: SPP ----------------
  99. pyramid_feats[-1] = self.neck(pyramid_feats[-1])
  100. # ---------------- Neck: PaFPN ----------------
  101. pyramid_feats = self.fpn(pyramid_feats)
  102. # ---------------- Heads ----------------
  103. cls_feats, reg_feats = self.det_heads(pyramid_feats)
  104. # ---------------- Preds ----------------
  105. outputs = self.pred_layers(cls_feats, reg_feats)
  106. all_cls_preds = outputs['pred_cls']
  107. all_box_preds = outputs['pred_box']
  108. if self.deploy:
  109. cls_preds = torch.cat(all_cls_preds, dim=1)[0]
  110. box_preds = torch.cat(all_box_preds, dim=1)[0]
  111. scores = cls_preds.sigmoid()
  112. bboxes = box_preds
  113. # [n_anchors_all, 4 + C]
  114. outputs = torch.cat([bboxes, scores], dim=-1)
  115. return outputs
  116. else:
  117. # post process
  118. bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
  119. return bboxes, scores, labels
  120. def forward(self, x):
  121. if not self.trainable:
  122. return self.inference_single_image(x)
  123. else:
  124. # ---------------- Backbone ----------------
  125. pyramid_feats = self.backbone(x)
  126. # ---------------- Neck: SPP ----------------
  127. pyramid_feats[-1] = self.neck(pyramid_feats[-1])
  128. # ---------------- Neck: PaFPN ----------------
  129. pyramid_feats = self.fpn(pyramid_feats)
  130. # ---------------- Heads ----------------
  131. cls_feats, reg_feats = self.det_heads(pyramid_feats)
  132. # ---------------- Preds ----------------
  133. outputs = self.pred_layers(cls_feats, reg_feats)
  134. return outputs