rtdetr.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .rtdetr_encoder import build_image_encoder
  5. from .rtdetr_decoder import build_transformer
  6. except:
  7. from rtdetr_encoder import build_image_encoder
  8. from rtdetr_decoder import build_transformer
  9. # Real-time Transformer-based Object Detector
  10. class RT_DETR(nn.Module):
  11. def __init__(self,
  12. cfg,
  13. num_classes = 80,
  14. conf_thresh = 0.1,
  15. topk = 100,
  16. deploy = False,
  17. no_multi_labels = False,
  18. ):
  19. super().__init__()
  20. # ----------- Basic setting -----------
  21. self.num_classes = num_classes
  22. self.num_topk = topk
  23. self.conf_thresh = conf_thresh
  24. self.no_multi_labels = no_multi_labels
  25. self.deploy = deploy
  26. # ----------- Network setting -----------
  27. ## Image encoder
  28. self.image_encoder = build_image_encoder(cfg)
  29. self.fpn_dims = self.image_encoder.fpn_dims
  30. ## Detect decoder
  31. self.detect_decoder = build_transformer(cfg, self.fpn_dims, num_classes, return_intermediate=self.training)
  32. def post_process(self, box_pred, cls_pred):
  33. if self.no_multi_labels:
  34. # [M,]
  35. scores, labels = torch.max(cls_pred.sigmoid(), dim=1)
  36. # Keep top k top scoring indices only.
  37. num_topk = min(self.num_topk, box_pred.size(0))
  38. # Topk candidates
  39. predicted_prob, topk_idxs = scores.sort(descending=True)
  40. topk_scores = predicted_prob[:num_topk]
  41. topk_idxs = topk_idxs[:num_topk]
  42. # Filter out the proposals with low confidence score
  43. keep_idxs = topk_scores > self.conf_thresh
  44. topk_idxs = topk_idxs[keep_idxs]
  45. # Top-k results
  46. topk_scores = topk_scores[keep_idxs]
  47. topk_labels = labels[topk_idxs]
  48. topk_bboxes = box_pred[topk_idxs]
  49. return topk_bboxes, topk_scores, topk_labels
  50. else:
  51. # Top-k select
  52. cls_pred = cls_pred[0].flatten().sigmoid_()
  53. box_pred = box_pred[0]
  54. # Keep top k top scoring indices only.
  55. num_topk = min(self.num_topk, box_pred.size(0))
  56. # Topk candidates
  57. predicted_prob, topk_idxs = cls_pred.sort(descending=True)
  58. topk_scores = predicted_prob[:num_topk]
  59. topk_idxs = topk_idxs[:self.num_topk]
  60. # Filter out the proposals with low confidence score
  61. keep_idxs = topk_scores > self.conf_thresh
  62. scores = topk_scores[keep_idxs]
  63. topk_idxs = topk_idxs[keep_idxs]
  64. topk_box_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  65. ## Top-k results
  66. topk_scores = predicted_prob[:self.num_topk]
  67. topk_labels = topk_idxs % self.num_classes
  68. topk_bboxes = box_pred[topk_box_idxs]
  69. return topk_bboxes, topk_scores, topk_labels
  70. def forward(self, x, targets=None):
  71. # ----------- Image Encoder -----------
  72. pyramid_feats = self.image_encoder(x)
  73. # ----------- Transformer -----------
  74. transformer_outputs = self.detect_decoder(pyramid_feats, targets)
  75. pred_boxes, pred_logits, enc_topk_bboxes, enc_topk_logits, dn_meta = transformer_outputs
  76. if self.training:
  77. return transformer_outputs
  78. else:
  79. box_preds = pred_boxes[-1]
  80. cls_preds = pred_logits[-1]
  81. # TODO: post-process
  82. bboxes, scores, labels = self.post_process(box_preds, cls_preds)
  83. return bboxes, scores, labels
  84. # ----------- Head -----------
  85. outputs = self.detect_head(pred_boxes, pred_logits, enc_topk_bboxes, enc_topk_logits, dn_meta, targets)
  86. if self.training:
  87. outputs_dict = outputs
  88. else:
  89. pred_boxes, pred_logits = outputs[0], outputs[1]
  90. return pred_boxes, pred_logits
  91. return outputs_dict
  92. if __name__ == '__main__':
  93. import time
  94. from thop import profile
  95. from loss import build_criterion
  96. # Model config
  97. cfg = {
  98. 'width': 1.0,
  99. 'depth': 1.0,
  100. 'out_stride': [8, 16, 32],
  101. # Image Encoder - Backbone
  102. 'backbone': 'resnet18',
  103. 'backbone_norm': 'BN',
  104. 'res5_dilation': False,
  105. 'pretrained': True,
  106. 'pretrained_weight': 'imagenet1k_v1',
  107. # Image Encoder - FPN
  108. 'fpn': 'hybrid_encoder',
  109. 'fpn_act': 'silu',
  110. 'fpn_norm': 'BN',
  111. 'fpn_depthwise': False,
  112. 'hidden_dim': 256,
  113. 'en_num_heads': 8,
  114. 'en_num_layers': 1,
  115. 'en_mlp_ratio': 4.0,
  116. 'en_dropout': 0.1,
  117. 'pe_temperature': 10000.,
  118. 'en_act': 'gelu',
  119. # Transformer Decoder
  120. 'transformer': 'rtdetr_transformer',
  121. 'hidden_dim': 256,
  122. 'de_num_heads': 8,
  123. 'de_num_layers': 6,
  124. 'de_mlp_ratio': 4.0,
  125. 'de_dropout': 0.0,
  126. 'de_act': 'gelu',
  127. 'de_num_points': 4,
  128. 'num_queries': 300,
  129. 'learnt_init_query': False,
  130. 'pe_temperature': 10000.,
  131. 'dn_num_denoising': 100,
  132. 'dn_label_noise_ratio': 0.5,
  133. 'dn_box_noise_scale': 1,
  134. # Head
  135. 'det_head': 'dino_head',
  136. # Matcher
  137. 'matcher_hpy': {'cost_class': 2.0,
  138. 'cost_bbox': 5.0,
  139. 'cost_giou': 2.0,},
  140. # Loss
  141. 'use_vfl': True,
  142. 'loss_coeff': {'class': 1,
  143. 'bbox': 5,
  144. 'giou': 2,
  145. 'no_object': 0.1,},
  146. }
  147. bs = 1
  148. # Create a batch of images & targets
  149. image = torch.randn(bs, 3, 640, 640)
  150. targets = [{
  151. 'labels': torch.tensor([2, 4, 5, 8]).long(),
  152. 'boxes': torch.tensor([[0, 0, 10, 10], [12, 23, 56, 70], [0, 10, 20, 30], [50, 60, 55, 150]]).float() / 640.
  153. }] * bs
  154. # Create model
  155. model = RT_DETR(cfg, num_classes=80)
  156. model.train()
  157. # Create criterion
  158. criterion = build_criterion(cfg, num_classes=80)
  159. # Model inference
  160. t0 = time.time()
  161. outputs = model(image, targets)
  162. t1 = time.time()
  163. print('Infer time: ', t1 - t0)
  164. # Compute loss
  165. loss = criterion(*outputs, targets)
  166. for k in loss.keys():
  167. print("{} : {}".format(k, loss[k].item()))
  168. print('==============================')
  169. model.eval()
  170. flops, params = profile(model, inputs=(image, ), verbose=False)
  171. print('==============================')
  172. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  173. print('Params : {:.2f} M'.format(params / 1e6))