rtdetr.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. import torch
  2. import torch.nn as nn
  3. try:
  4. from .basic_modules.basic import multiclass_nms
  5. from .rtdetr_encoder import build_image_encoder
  6. from .rtdetr_decoder import build_transformer
  7. except:
  8. from basic_modules.basic import multiclass_nms
  9. from rtdetr_encoder import build_image_encoder
  10. from rtdetr_decoder import build_transformer
  11. # Real-time DETR
  12. class RT_DETR(nn.Module):
  13. def __init__(self,
  14. cfg,
  15. num_classes = 80,
  16. conf_thresh = 0.1,
  17. nms_thresh = 0.5,
  18. topk = 300,
  19. onnx_deploy = False,
  20. no_multi_labels = False,
  21. use_nms = False,
  22. nms_class_agnostic = False,
  23. ):
  24. super().__init__()
  25. # ----------- Basic setting -----------
  26. self.num_classes = num_classes
  27. self.num_topk = topk
  28. self.onnx_deploy = onnx_deploy
  29. ## Post-process parameters
  30. self.use_nms = use_nms
  31. self.nms_thresh = nms_thresh
  32. self.conf_thresh = conf_thresh
  33. self.no_multi_labels = no_multi_labels
  34. self.nms_class_agnostic = nms_class_agnostic
  35. # ----------- Network setting -----------
  36. ## Image encoder
  37. self.image_encoder = build_image_encoder(cfg)
  38. self.fpn_dims = self.image_encoder.fpn_dims
  39. ## Detect decoder
  40. self.detect_decoder = build_transformer(cfg, self.fpn_dims, num_classes, return_intermediate=self.training)
  41. def deploy(self):
  42. assert not self.training
  43. for m in self.modules():
  44. if hasattr(m, 'convert_to_deploy'):
  45. m.convert_to_deploy()
  46. return self
  47. def post_process(self, box_pred, cls_pred):
  48. # xywh -> xyxy
  49. box_preds_x1y1 = box_pred[..., :2] - 0.5 * box_pred[..., 2:]
  50. box_preds_x2y2 = box_pred[..., :2] + 0.5 * box_pred[..., 2:]
  51. box_pred = torch.cat([box_preds_x1y1, box_preds_x2y2], dim=-1)
  52. cls_pred = cls_pred[0]
  53. box_pred = box_pred[0]
  54. if self.no_multi_labels:
  55. # [M,]
  56. scores, labels = torch.max(cls_pred.sigmoid(), dim=1)
  57. # Keep top k top scoring indices only.
  58. num_topk = min(self.num_topk, box_pred.size(0))
  59. # Topk candidates
  60. predicted_prob, topk_idxs = scores.sort(descending=True)
  61. topk_scores = predicted_prob[:num_topk]
  62. topk_idxs = topk_idxs[:num_topk]
  63. # Filter out the proposals with low confidence score
  64. keep_idxs = topk_scores > self.conf_thresh
  65. topk_idxs = topk_idxs[keep_idxs]
  66. # Top-k results
  67. topk_scores = topk_scores[keep_idxs]
  68. topk_labels = labels[topk_idxs]
  69. topk_bboxes = box_pred[topk_idxs]
  70. else:
  71. # Top-k select
  72. cls_pred = cls_pred.flatten().sigmoid_()
  73. box_pred = box_pred
  74. # Keep top k top scoring indices only.
  75. num_topk = min(self.num_topk, box_pred.size(0))
  76. # Topk candidates
  77. predicted_prob, topk_idxs = cls_pred.sort(descending=True)
  78. topk_scores = predicted_prob[:num_topk]
  79. topk_idxs = topk_idxs[:self.num_topk]
  80. # Filter out the proposals with low confidence score
  81. keep_idxs = topk_scores > self.conf_thresh
  82. topk_scores = topk_scores[keep_idxs]
  83. topk_idxs = topk_idxs[keep_idxs]
  84. topk_box_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  85. ## Top-k results
  86. topk_labels = topk_idxs % self.num_classes
  87. topk_bboxes = box_pred[topk_box_idxs]
  88. if not self.onnx_deploy:
  89. topk_scores = topk_scores.cpu().numpy()
  90. topk_labels = topk_labels.cpu().numpy()
  91. topk_bboxes = topk_bboxes.cpu().numpy()
  92. # nms
  93. if self.use_nms:
  94. topk_scores, topk_labels, topk_bboxes = multiclass_nms(
  95. topk_scores, topk_labels, topk_bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
  96. return topk_bboxes, topk_scores, topk_labels
  97. def forward(self, x, targets=None):
  98. # ----------- Image Encoder -----------
  99. pyramid_feats = self.image_encoder(x)
  100. # ----------- Transformer -----------
  101. outputs = self.detect_decoder(pyramid_feats, targets)
  102. if not self.training:
  103. img_h, img_w = x.shape[2:]
  104. box_pred = outputs["pred_boxes"]
  105. cls_pred = outputs["pred_logits"]
  106. # rescale bbox
  107. box_pred[..., [0, 2]] *= img_h
  108. box_pred[..., [1, 3]] *= img_w
  109. # post-process
  110. bboxes, scores, labels = self.post_process(box_pred, cls_pred)
  111. outputs = {
  112. "scores": scores,
  113. "labels": labels,
  114. "bboxes": bboxes,
  115. }
  116. return outputs
  117. if __name__ == '__main__':
  118. import time
  119. from thop import profile
  120. from loss import build_criterion
  121. # Model config
  122. cfg = {
  123. # Image Encoder - Backbone
  124. 'backbone': 'resnet101',
  125. 'backbone_norm': 'BN',
  126. 'res5_dilation': False,
  127. 'pretrained': False,
  128. 'pretrained_weight': 'imagenet1k_v1',
  129. 'freeze_at': 0,
  130. 'freeze_stem_only': False,
  131. 'out_stride': [8, 16, 32],
  132. 'max_stride': 32,
  133. # Image Encoder - FPN
  134. 'fpn': 'hybrid_encoder',
  135. 'fpn_num_blocks': 3,
  136. 'fpn_expansion': 0.5,
  137. 'fpn_act': 'silu',
  138. 'fpn_norm': 'BN',
  139. 'fpn_depthwise': False,
  140. 'hidden_dim': 384,
  141. 'en_num_heads': 8,
  142. 'en_num_layers': 1,
  143. 'en_ffn_dim': 2048,
  144. 'en_dropout': 0.0,
  145. 'pe_temperature': 10000.,
  146. 'en_act': 'gelu',
  147. # Transformer Decoder
  148. 'transformer': 'rtdetr_transformer',
  149. 'de_num_heads': 8,
  150. 'de_num_layers': 6,
  151. 'de_ffn_dim': 2048,
  152. 'de_dropout': 0.0,
  153. 'de_act': 'gelu',
  154. 'de_num_points': 4,
  155. 'num_queries': 300,
  156. 'learnt_init_query': False,
  157. 'pe_temperature': 10000.,
  158. 'dn_num_denoising': 100,
  159. 'dn_label_noise_ratio': 0.5,
  160. 'dn_box_noise_scale': 1,
  161. # Matcher
  162. 'matcher_hpy': {'cost_class': 2.0,
  163. 'cost_bbox': 5.0,
  164. 'cost_giou': 2.0,},
  165. # Loss
  166. 'use_vfl': True,
  167. 'loss_coeff': {'class': 1,
  168. 'bbox': 5,
  169. 'giou': 2,
  170. 'no_object': 0.1,},
  171. }
  172. bs = 1
  173. # Create a batch of images & targets
  174. image = torch.randn(bs, 3, 640, 640).cuda()
  175. targets = [{
  176. 'labels': torch.tensor([2, 4, 5, 8]).long().cuda(),
  177. 'boxes': torch.tensor([[0, 0, 10, 10], [12, 23, 56, 70], [0, 10, 20, 30], [50, 60, 55, 150]]).float().cuda() / 640.
  178. }] * bs
  179. # Create model
  180. model = RT_DETR(cfg, num_classes=20)
  181. model.train().cuda()
  182. # Create criterion
  183. criterion = build_criterion(cfg, num_classes=20)
  184. # Model inference
  185. outputs = model(image, targets)
  186. # Compute loss
  187. loss = criterion(outputs, targets)
  188. for k in loss.keys():
  189. print("{} : {}".format(k, loss[k].item()))
  190. # Inference
  191. with torch.no_grad():
  192. model.eval()
  193. model.deploy()
  194. t0 = time.time()
  195. outputs = model(image)
  196. t1 = time.time()
  197. print('Infer time: ', t1 - t0)
  198. print('==============================')
  199. model.eval()
  200. flops, params = profile(model, inputs=(image, ), verbose=False)
  201. print('==============================')
  202. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  203. print('Params : {:.2f} M'.format(params / 1e6))