detr.py 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. # --------------- Model components ---------------
  5. from ...backbone import build_backbone
  6. from ...transformer import build_transformer
  7. from ...basic.mlp import MLP
  8. # Detection with Transformer
  9. class DETR(nn.Module):
  10. def __init__(self,
  11. cfg,
  12. num_classes :int = 80,
  13. conf_thresh :float = 0.05,
  14. topk :int = 1000,
  15. ):
  16. super().__init__()
  17. # ---------------------- Basic Parameters ----------------------
  18. self.cfg = cfg
  19. self.topk = topk
  20. self.num_classes = num_classes
  21. self.conf_thresh = conf_thresh
  22. # ---------------------- Network Parameters ----------------------
  23. ## Backbone
  24. self.backbone, feat_dims = build_backbone(cfg)
  25. ## Input proj
  26. self.input_proj = nn.Conv2d(feat_dims[-1], cfg.hidden_dim, kernel_size=1)
  27. ## Object Queries
  28. self.query_embed = nn.Embedding(cfg.num_queries, cfg.hidden_dim)
  29. ## Transformer
  30. self.transformer = build_transformer(cfg, return_intermediate_dec=True)
  31. ## Output
  32. self.class_embed = nn.Linear(cfg.hidden_dim, num_classes + 1)
  33. self.bbox_embed = MLP(cfg.hidden_dim, cfg.feedward_dim, 4, 3)
  34. @torch.jit.unused
  35. def set_aux_loss(self, outputs_class, outputs_coord):
  36. return [{'pred_logits': a, 'pred_boxes': b}
  37. for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
  38. def post_process(self, cls_pred, box_pred):
  39. """
  40. Input:
  41. cls_pred: (Tensor) [Nq, C]
  42. box_pred: (Tensor) [Nq, 4]
  43. """
  44. # [Nq x C,]
  45. scores_i = cls_pred.flatten()
  46. # Keep top k top scoring indices only.
  47. num_topk = min(self.topk, box_pred.size(0))
  48. # torch.sort is actually faster than .topk (at least on GPUs)
  49. predicted_prob, topk_idxs = scores_i.sort(descending=True)
  50. topk_scores = predicted_prob[:num_topk]
  51. topk_idxs = topk_idxs[:num_topk]
  52. # filter out the proposals with low confidence score
  53. keep_idxs = topk_scores > self.conf_thresh
  54. topk_idxs = topk_idxs[keep_idxs]
  55. # final scores
  56. scores = topk_scores[keep_idxs]
  57. # final labels
  58. labels = topk_idxs % self.num_classes
  59. # final bboxes
  60. anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  61. bboxes = box_pred[anchor_idxs]
  62. # to cpu & numpy
  63. scores = scores.cpu().numpy()
  64. labels = labels.cpu().numpy()
  65. bboxes = bboxes.cpu().numpy()
  66. return bboxes, scores, labels
  67. def forward(self, src, src_mask=None):
  68. # ---------------- Backbone ----------------
  69. pyramid_feats = self.backbone(src)
  70. feat = self.input_proj(pyramid_feats[-1])
  71. if src_mask is not None:
  72. src_mask = F.interpolate(src_mask[None].float(), size=feat.shape[-2:]).bool()[0]
  73. else:
  74. src_mask = torch.zeros([feat.shape[0], *feat.shape[-2:]], device=feat.device, dtype=torch.bool)
  75. # ---------------- Transformer ----------------
  76. hs = self.transformer(feat, src_mask, self.query_embed.weight)[0]
  77. # ---------------- Head ----------------
  78. outputs_class = self.class_embed(hs)
  79. outputs_coord = self.bbox_embed(hs).sigmoid()
  80. if self.training:
  81. outputs = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
  82. outputs['aux_outputs'] = self.set_aux_loss(outputs_class, outputs_coord)
  83. else:
  84. # [B, N, C] -> [N, C]
  85. cls_pred = outputs_class[-1].softmax(-1)[..., :-1]
  86. box_pred = outputs_coord[-1]
  87. cxcy_pred = box_pred[..., :2]
  88. bwbh_pred = box_pred[..., 2:]
  89. x1y1_pred = cxcy_pred - 0.5 * bwbh_pred
  90. x2y2_pred = cxcy_pred + 0.5 * bwbh_pred
  91. box_pred = torch.cat([x1y1_pred, x2y2_pred], dim=-1)
  92. # Post-process (no NMS)
  93. bboxes, scores, labels = self.post_process(cls_pred, box_pred)
  94. outputs = {
  95. 'scores': scores,
  96. 'labels': labels,
  97. 'bboxes': bboxes
  98. }
  99. return outputs