rtdetr.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. import torch
  2. import torch.nn as nn
  3. from .rtdetr_encoder import build_encoder
  4. from .rtdetr_decoder import build_decoder
  5. from .rtdetr_dethead import build_dethead
  6. # Real-time DETR
  7. class RTDETR(nn.Module):
  8. def __init__(self,
  9. cfg,
  10. device,
  11. num_classes = 20,
  12. trainable = False,
  13. aux_loss = False,
  14. with_box_refine = False,
  15. deploy = False):
  16. super(RTDETR, self).__init__()
  17. # --------- Basic Parameters ----------
  18. self.cfg = cfg
  19. self.device = device
  20. self.num_classes = num_classes
  21. self.trainable = trainable
  22. self.max_stride = max(cfg['stride'])
  23. self.d_model = round(cfg['d_model'] * self.cfg['width'])
  24. self.aux_loss = aux_loss
  25. self.with_box_refine = with_box_refine
  26. self.deploy = deploy
  27. # --------- Network Parameters ----------
  28. ## Encoder
  29. self.encoder = build_encoder(cfg, trainable, 'img_encoder')
  30. ## Decoder
  31. self.decoder = build_decoder(cfg, self.d_model, return_intermediate=aux_loss)
  32. ## DetHead
  33. self.dethead = build_dethead(cfg, self.d_model, num_classes, with_box_refine)
  34. # set for TR-Decoder
  35. self.decoder.class_embed = self.dethead.class_embed
  36. self.decoder.bbox_embed = self.dethead.bbox_embed
  37. # ---------------------- Basic Functions ----------------------
  38. def position_embedding(self, x, temperature=10000):
  39. hs, ws = x.shape[-2:]
  40. device = x.device
  41. num_pos_feats = x.shape[1] // 2
  42. scale = 2 * 3.141592653589793
  43. # generate xy coord mat
  44. y_embed, x_embed = torch.meshgrid(
  45. [torch.arange(1, hs+1, dtype=torch.float32),
  46. torch.arange(1, ws+1, dtype=torch.float32)])
  47. y_embed = y_embed / (hs + 1e-6) * scale
  48. x_embed = x_embed / (ws + 1e-6) * scale
  49. # [H, W] -> [1, H, W]
  50. y_embed = y_embed[None, :, :].to(device)
  51. x_embed = x_embed[None, :, :].to(device)
  52. dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=device)
  53. dim_t_ = torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats
  54. dim_t = temperature ** (2 * dim_t_)
  55. pos_x = torch.div(x_embed[:, :, :, None], dim_t)
  56. pos_y = torch.div(y_embed[:, :, :, None], dim_t)
  57. pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
  58. pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
  59. # [B, C, H, W]
  60. pos_embed = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
  61. return pos_embed
  62. @torch.jit.unused
  63. def set_aux_loss(self, outputs_class, outputs_coord):
  64. # this is a workaround to make torchscript happy, as torchscript
  65. # doesn't support dictionary with non-homogeneous values, such
  66. # as a dict having both a Tensor and a list.
  67. return [{'pred_logits': a, 'pred_boxes': b}
  68. for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
  69. # ---------------------- Main Process for Inference ----------------------
  70. @torch.no_grad()
  71. def inference_single_image(self, x):
  72. # -------------------- Encoder --------------------
  73. pyramid_feats = self.encoder(x)
  74. # -------------------- Pos Embed --------------------
  75. memory = torch.cat([feat.flatten(2) for feat in pyramid_feats], dim=-1)
  76. memory_pos = torch.cat([self.position_embedding(feat).flatten(2) for feat in pyramid_feats], dim=-1)
  77. memory = memory.permute(0, 2, 1).contiguous()
  78. memory_pos = memory_pos.permute(0, 2, 1).contiguous()
  79. # -------------------- Decoder --------------------
  80. hs, reference = self.decoder(memory, memory_pos)
  81. # -------------------- DetHead --------------------
  82. out_logits, out_bbox = self.dethead(hs, reference, False)
  83. cls_pred, box_pred = out_logits[0], out_bbox[0]
  84. # -------------------- Top-k --------------------
  85. cls_pred = cls_pred.flatten().sigmoid_()
  86. num_topk = 100
  87. predicted_prob, topk_idxs = cls_pred.sort(descending=True)
  88. topk_idxs = topk_idxs[:num_topk]
  89. topk_box_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  90. topk_scores = predicted_prob[:num_topk]
  91. topk_labels = topk_idxs % self.num_classes
  92. topk_bboxes = box_pred[topk_box_idxs]
  93. # denormalize bbox
  94. img_h, img_w = x.shape[-2:]
  95. topk_bboxes[..., 0::2] *= img_w
  96. topk_bboxes[..., 1::2] *= img_h
  97. if self.deploy:
  98. return topk_bboxes, topk_scores, topk_labels
  99. else:
  100. return topk_bboxes.cpu().numpy(), topk_scores.cpu().numpy(), topk_labels.cpu().numpy()
  101. # ---------------------- Main Process for Training ----------------------
  102. def forward(self, x):
  103. if not self.trainable:
  104. return self.inference_single_image(x)
  105. else:
  106. # -------------------- Encoder --------------------
  107. pyramid_feats = self.encoder(x)
  108. # -------------------- Pos Embed --------------------
  109. memory = torch.cat([feat.flatten(2) for feat in pyramid_feats], dim=-1)
  110. memory_pos = torch.cat([self.position_embedding(feat).flatten(2) for feat in pyramid_feats], dim=-1)
  111. memory = memory.permute(0, 2, 1).contiguous()
  112. memory_pos = memory_pos.permute(0, 2, 1).contiguous()
  113. # -------------------- Decoder --------------------
  114. hs, reference = self.decoder(memory, memory_pos)
  115. # -------------------- DetHead --------------------
  116. outputs_class, outputs_coords = self.dethead(hs, reference, True)
  117. outputs = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coords[-1]}
  118. if self.aux_loss:
  119. outputs['aux_outputs'] = self.set_aux_loss(outputs_class, outputs_coords)
  120. return outputs