yolov10.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. # --------------- Torch components ---------------
  2. import copy
  3. import torch
  4. import torch.nn as nn
  5. # --------------- Model components ---------------
  6. from .yolov10_backbone import Yolov10Backbone
  7. from .yolov10_pafpn import Yolov10PaFPN
  8. from .yolov10_head import Yolov10DetHead
  9. from .yolov10_pred import Yolov10DetPredLayer
  10. # YOLOv10
  11. class Yolov10(nn.Module):
  12. def __init__(self,
  13. cfg,
  14. is_val = False,
  15. ) -> None:
  16. super(Yolov10, self).__init__()
  17. # ---------------------- Basic setting ----------------------
  18. self.cfg = cfg
  19. self.num_classes = cfg.num_classes
  20. ## Post-process parameters
  21. self.topk_candidates = cfg.val_topk if is_val else cfg.test_topk
  22. self.conf_thresh = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
  23. self.nms_thresh = cfg.val_nms_thresh if is_val else cfg.test_nms_thresh
  24. self.no_multi_labels = False if is_val else True
  25. # ---------------------- Network Parameters ----------------------
  26. ## Backbone
  27. self.backbone = Yolov10Backbone(cfg)
  28. self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
  29. ## PaFPN
  30. self.fpn = Yolov10PaFPN(cfg, self.backbone.feat_dims)
  31. ## Head
  32. self.head_o2m = Yolov10DetHead(cfg, self.fpn.out_dims)
  33. self.pred_o2m = Yolov10DetPredLayer(cfg, self.head_o2m.cls_head_dim, self.head_o2m.reg_head_dim)
  34. self.head_o2o = copy.deepcopy(self.head_o2m)
  35. self.pred_o2o = copy.deepcopy(self.pred_o2m)
  36. def post_process(self, cls_preds, box_preds):
  37. """
  38. We process predictions at each scale hierarchically
  39. Input:
  40. cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
  41. box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
  42. Output:
  43. bboxes: np.array -> [N, 4]
  44. scores: np.array -> [N,]
  45. labels: np.array -> [N,]
  46. """
  47. all_scores = []
  48. all_labels = []
  49. all_bboxes = []
  50. for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
  51. cls_pred_i = cls_pred_i[0]
  52. box_pred_i = box_pred_i[0]
  53. if self.no_multi_labels:
  54. # [M,]
  55. scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
  56. # Keep top k top scoring indices only.
  57. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  58. # topk candidates
  59. predicted_prob, topk_idxs = scores.sort(descending=True)
  60. topk_scores = predicted_prob[:num_topk]
  61. topk_idxs = topk_idxs[:num_topk]
  62. # filter out the proposals with low confidence score
  63. keep_idxs = topk_scores > self.conf_thresh
  64. scores = topk_scores[keep_idxs]
  65. topk_idxs = topk_idxs[keep_idxs]
  66. labels = labels[topk_idxs]
  67. bboxes = box_pred_i[topk_idxs]
  68. else:
  69. # [M, C] -> [MC,]
  70. scores_i = cls_pred_i.sigmoid().flatten()
  71. # Keep top k top scoring indices only.
  72. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  73. # torch.sort is actually faster than .topk (at least on GPUs)
  74. predicted_prob, topk_idxs = scores_i.sort(descending=True)
  75. topk_scores = predicted_prob[:num_topk]
  76. topk_idxs = topk_idxs[:num_topk]
  77. # filter out the proposals with low confidence score
  78. keep_idxs = topk_scores > self.conf_thresh
  79. scores = topk_scores[keep_idxs]
  80. topk_idxs = topk_idxs[keep_idxs]
  81. anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  82. labels = topk_idxs % self.num_classes
  83. bboxes = box_pred_i[anchor_idxs]
  84. all_scores.append(scores)
  85. all_labels.append(labels)
  86. all_bboxes.append(bboxes)
  87. scores = torch.cat(all_scores, dim=0)
  88. labels = torch.cat(all_labels, dim=0)
  89. bboxes = torch.cat(all_bboxes, dim=0)
  90. # to cpu & numpy
  91. scores = scores.cpu().numpy()
  92. labels = labels.cpu().numpy()
  93. bboxes = bboxes.cpu().numpy()
  94. # keep top-300 results
  95. scores = scores[:300]
  96. bboxes = bboxes[:300]
  97. labels = labels[:300]
  98. return bboxes, scores, labels
  99. def forward(self, x):
  100. # ---------------- Backbone ----------------
  101. pyramid_feats = self.backbone(x)
  102. # ---------------- PaFPN ----------------
  103. pyramid_feats = self.fpn(pyramid_feats)
  104. # ---------------- Heads (one-to-one) ----------------
  105. pyramid_feats_detach = [feat.detach() for feat in pyramid_feats]
  106. cls_feats, reg_feats = self.head_o2o(pyramid_feats_detach)
  107. outputs_o2o = self.pred_o2o(cls_feats, reg_feats)
  108. outputs_o2o['image_size'] = [x.shape[2], x.shape[3]]
  109. if not self.training:
  110. all_cls_preds = outputs_o2o['pred_cls']
  111. all_box_preds = outputs_o2o['pred_box']
  112. # post process
  113. bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
  114. outputs = {
  115. "scores": scores,
  116. "labels": labels,
  117. "bboxes": bboxes
  118. }
  119. else:
  120. # ---------------- Heads (one-to-many) ----------------
  121. cls_feats, reg_feats = self.head_o2m(pyramid_feats)
  122. outputs_o2m = self.pred_o2m(cls_feats, reg_feats)
  123. outputs_o2m['image_size'] = [x.shape[2], x.shape[3]]
  124. outputs = {
  125. "outputs_o2o": outputs_o2o,
  126. "outputs_o2m": outputs_o2m,
  127. }
  128. return outputs