yolov8_e2e.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. # --------------- Torch components ---------------
  2. import copy
  3. import torch
  4. import torch.nn as nn
  5. # --------------- Model components ---------------
  6. from .yolov8_backbone import Yolov8Backbone
  7. from .yolov8_neck import SPPF
  8. from .yolov8_pafpn import Yolov8PaFPN
  9. from .yolov8_head import Yolov8DetHead
  10. from .yolov8_pred import Yolov8DetPredLayer
  11. # End-to-End YOLOv8
  12. class Yolov8E2E(nn.Module):
  13. def __init__(self, cfg, is_val = False):
  14. super(Yolov8E2E, self).__init__()
  15. # ---------------------- Basic setting ----------------------
  16. self.cfg = cfg
  17. self.num_classes = cfg.num_classes
  18. ## Post-process parameters
  19. self.topk_candidates = cfg.val_topk if is_val else cfg.test_topk
  20. self.conf_thresh = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
  21. self.no_multi_labels = False if is_val else True
  22. # ---------------------- Model Parameters ----------------------
  23. ## Backbone
  24. self.backbone = Yolov8Backbone(cfg)
  25. self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
  26. ## Neck
  27. self.neck = SPPF(cfg, self.pyramid_feat_dims[-1], self.pyramid_feat_dims[-1])
  28. self.pyramid_feat_dims[-1] = self.neck.out_dim
  29. ## Neck: PaFPN
  30. self.fpn = Yolov8PaFPN(cfg, self.backbone.feat_dims)
  31. ## Head (one-to-one)
  32. self.head_o2o = Yolov8DetHead(cfg, self.fpn.out_dims)
  33. ## Pred (one-to-one)
  34. self.pred_o2o = Yolov8DetPredLayer(cfg, self.head_o2o.cls_head_dim, self.head_o2o.reg_head_dim)
  35. ## Aux head (one-to-many)
  36. self.head_o2m = copy.deepcopy(self.head_o2o)
  37. ## Aux Pred (one-to-many)
  38. self.pred_o2m = copy.deepcopy(self.pred_o2o)
  39. def post_process(self, cls_preds, box_preds):
  40. """
  41. We process predictions at each scale hierarchically
  42. Input:
  43. cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
  44. box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
  45. Output:
  46. bboxes: np.array -> [N, 4]
  47. scores: np.array -> [N,]
  48. labels: np.array -> [N,]
  49. """
  50. all_scores = []
  51. all_labels = []
  52. all_bboxes = []
  53. for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
  54. cls_pred_i = cls_pred_i[0]
  55. box_pred_i = box_pred_i[0]
  56. if self.no_multi_labels:
  57. # [M,]
  58. scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
  59. # Keep top k top scoring indices only.
  60. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  61. # topk candidates
  62. predicted_prob, topk_idxs = scores.sort(descending=True)
  63. topk_scores = predicted_prob[:num_topk]
  64. topk_idxs = topk_idxs[:num_topk]
  65. # filter out the proposals with low confidence score
  66. keep_idxs = topk_scores > self.conf_thresh
  67. scores = topk_scores[keep_idxs]
  68. topk_idxs = topk_idxs[keep_idxs]
  69. labels = labels[topk_idxs]
  70. bboxes = box_pred_i[topk_idxs]
  71. else:
  72. # [M, C] -> [MC,]
  73. scores_i = cls_pred_i.sigmoid().flatten()
  74. # Keep top k top scoring indices only.
  75. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  76. # torch.sort is actually faster than .topk (at least on GPUs)
  77. predicted_prob, topk_idxs = scores_i.sort(descending=True)
  78. topk_scores = predicted_prob[:num_topk]
  79. topk_idxs = topk_idxs[:num_topk]
  80. # filter out the proposals with low confidence score
  81. keep_idxs = topk_scores > self.conf_thresh
  82. scores = topk_scores[keep_idxs]
  83. topk_idxs = topk_idxs[keep_idxs]
  84. anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  85. labels = topk_idxs % self.num_classes
  86. bboxes = box_pred_i[anchor_idxs]
  87. all_scores.append(scores)
  88. all_labels.append(labels)
  89. all_bboxes.append(bboxes)
  90. scores = torch.cat(all_scores, dim=0)
  91. labels = torch.cat(all_labels, dim=0)
  92. bboxes = torch.cat(all_bboxes, dim=0)
  93. # to cpu & numpy
  94. scores = scores.cpu().numpy()
  95. labels = labels.cpu().numpy()
  96. bboxes = bboxes.cpu().numpy()
  97. return bboxes, scores, labels
  98. def inference_o2o(self, x):
  99. # ---------------- Backbone ----------------
  100. pyramid_feats = self.backbone(x)
  101. # ---------------- Neck: SPP ----------------
  102. pyramid_feats[-1] = self.neck(pyramid_feats[-1])
  103. # ---------------- Neck: PaFPN ----------------
  104. pyramid_feats = self.fpn(pyramid_feats)
  105. # ---------------- Heads ----------------
  106. cls_feats, reg_feats = self.head_o2o(pyramid_feats)
  107. # ---------------- Preds ----------------
  108. outputs = self.pred_o2o(cls_feats, reg_feats)
  109. outputs['image_size'] = [x.shape[2], x.shape[3]]
  110. all_cls_preds = outputs['pred_cls']
  111. all_box_preds = outputs['pred_box']
  112. # post process (no NMS)
  113. bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
  114. outputs = {
  115. "scores": scores,
  116. "labels": labels,
  117. "bboxes": bboxes
  118. }
  119. return outputs
  120. def forward(self, x):
  121. if not self.training:
  122. return self.inference_o2o(x)
  123. else:
  124. # ---------------- Backbone ----------------
  125. pyramid_feats = self.backbone(x)
  126. # ---------------- Neck: SPP ----------------
  127. pyramid_feats[-1] = self.neck(pyramid_feats[-1])
  128. # ---------------- Neck: PaFPN ----------------
  129. pyramid_feats = self.fpn(pyramid_feats)
  130. # ---------------- Heads ----------------
  131. o2m_cls_feats, o2m_reg_feats = self.head_o2m(pyramid_feats)
  132. # ---------------- Preds ----------------
  133. outputs_o2m = self.pred_o2m(o2m_cls_feats, o2m_reg_feats)
  134. outputs_o2m['image_size'] = [x.shape[2], x.shape[3]]
  135. # ---------------- Heads (one-to-one) ----------------
  136. o2o_cls_feats, o2o_reg_feats = self.head_o2o([feat.detach() for feat in pyramid_feats])
  137. # ---------------- Preds (one-to-one) ----------------
  138. outputs_o2o = self.pred_o2o(o2o_cls_feats, o2o_reg_feats)
  139. outputs_o2o['image_size'] = [x.shape[2], x.shape[3]]
  140. outputs = {
  141. "outputs_o2m": outputs_o2m,
  142. "outputs_o2o": outputs_o2o,
  143. }
  144. return outputs