vitdet.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. # --------------- Torch components ---------------
  2. import torch
  3. import torch.nn as nn
  4. # --------------- Model components ---------------
  5. try:
  6. from .vitdet_encoder import build_image_encoder
  7. from .vitdet_decoder import build_decoder
  8. from .vitdet_head import build_predictor
  9. from .basic_modules.basic import multiclass_nms
  10. except:
  11. from vitdet_encoder import build_image_encoder
  12. from vitdet_decoder import build_decoder
  13. from vitdet_head import build_predictor
  14. from basic_modules.basic import multiclass_nms
  15. # Real-time ViT-based Object Detector
  16. class ViTDet(nn.Module):
  17. def __init__(self,
  18. cfg,
  19. device,
  20. num_classes = 20,
  21. conf_thresh = 0.01,
  22. nms_thresh = 0.5,
  23. topk = 1000,
  24. trainable = False,
  25. deploy = False,
  26. no_multi_labels = False,
  27. nms_class_agnostic = False,
  28. ):
  29. super(ViTDet, self).__init__()
  30. # ---------------------- Basic Parameters ----------------------
  31. self.cfg = cfg
  32. self.device = device
  33. self.strides = cfg['stride']
  34. self.num_classes = num_classes
  35. ## Scale hidden channels by width_factor
  36. cfg['hidden_dim'] = round(cfg['hidden_dim'] * cfg['width'])
  37. cfg['pretrained'] = cfg['pretrained'] & trainable
  38. ## Post-process parameters
  39. self.conf_thresh = conf_thresh
  40. self.nms_thresh = nms_thresh
  41. self.topk = topk
  42. self.deploy = deploy
  43. self.no_multi_labels = no_multi_labels
  44. self.nms_class_agnostic = nms_class_agnostic
  45. # ---------------------- Network Parameters ----------------------
  46. ## ----------- Encoder -----------
  47. self.encoder = build_image_encoder(cfg)
  48. ## ----------- Decoder -----------
  49. self.decoder = build_decoder(cfg, self.encoder.fpn_dims, num_levels=3)
  50. ## ----------- Preds -----------
  51. self.predictor = build_predictor(cfg, self.strides, num_classes, 4, 3)
  52. def post_process(self, cls_preds, box_preds):
  53. """
  54. Input:
  55. cls_preds: List[np.array] -> [[M, C], ...]
  56. box_preds: List[np.array] -> [[M, 4], ...]
  57. Output:
  58. bboxes: np.array -> [N, 4]
  59. scores: np.array -> [N,]
  60. labels: np.array -> [N,]
  61. """
  62. all_scores = []
  63. all_labels = []
  64. all_bboxes = []
  65. for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
  66. cls_pred_i = cls_pred_i[0]
  67. box_pred_i = box_pred_i[0]
  68. if self.no_multi_labels:
  69. # [M,]
  70. scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
  71. # Keep top k top scoring indices only.
  72. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  73. # topk candidates
  74. predicted_prob, topk_idxs = scores.sort(descending=True)
  75. topk_scores = predicted_prob[:num_topk]
  76. topk_idxs = topk_idxs[:num_topk]
  77. # filter out the proposals with low confidence score
  78. keep_idxs = topk_scores > self.conf_thresh
  79. scores = topk_scores[keep_idxs]
  80. topk_idxs = topk_idxs[keep_idxs]
  81. labels = labels[topk_idxs]
  82. bboxes = box_pred_i[topk_idxs]
  83. else:
  84. # [M, C] -> [MC,]
  85. scores_i = cls_pred_i.sigmoid().flatten()
  86. # Keep top k top scoring indices only.
  87. num_topk = min(self.topk_candidates, box_pred_i.size(0))
  88. # torch.sort is actually faster than .topk (at least on GPUs)
  89. predicted_prob, topk_idxs = scores_i.sort(descending=True)
  90. topk_scores = predicted_prob[:num_topk]
  91. topk_idxs = topk_idxs[:num_topk]
  92. # filter out the proposals with low confidence score
  93. keep_idxs = topk_scores > self.conf_thresh
  94. scores = topk_scores[keep_idxs]
  95. topk_idxs = topk_idxs[keep_idxs]
  96. anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
  97. labels = topk_idxs % self.num_classes
  98. bboxes = box_pred_i[anchor_idxs]
  99. all_scores.append(scores)
  100. all_labels.append(labels)
  101. all_bboxes.append(bboxes)
  102. scores = torch.cat(all_scores, dim=0)
  103. labels = torch.cat(all_labels, dim=0)
  104. bboxes = torch.cat(all_bboxes, dim=0)
  105. if not self.deploy:
  106. # to cpu & numpy
  107. scores = scores.cpu().numpy()
  108. labels = labels.cpu().numpy()
  109. bboxes = bboxes.cpu().numpy()
  110. # nms
  111. scores, labels, bboxes = multiclass_nms(
  112. scores, labels, bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
  113. return bboxes, scores, labels
  114. def forward(self, x):
  115. # ---------------- Backbone ----------------
  116. pyramid_feats = self.encoder(x)
  117. # ---------------- Heads ----------------
  118. outputs = self.decoder(pyramid_feats)
  119. # ---------------- Preds ----------------
  120. outputs = self.predictor(outputs['cls_feats'], outputs['reg_feats'])
  121. if not self.training:
  122. cls_pred = outputs["pred_cls"]
  123. box_pred = outputs["pred_box"]
  124. # post process
  125. bboxes, scores, labels = self.post_process(cls_pred, box_pred)
  126. outputs = {
  127. "scores": scores,
  128. "labels": labels,
  129. "bboxes": bboxes
  130. }
  131. return outputs
  132. if __name__ == '__main__':
  133. import time
  134. from thop import profile
  135. from loss import build_criterion
  136. # Model config
  137. cfg = {
  138. 'width': 1.0,
  139. 'depth': 1.0,
  140. 'out_stride': [8, 16, 32],
  141. # Image Encoder - Backbone
  142. 'backbone': 'resnet18',
  143. 'backbone_norm': 'BN',
  144. 'res5_dilation': False,
  145. 'pretrained': True,
  146. 'pretrained_weight': 'imagenet1k_v1',
  147. 'freeze_at': 0,
  148. 'freeze_stem_only': False,
  149. 'out_stride': [8, 16, 32],
  150. 'max_stride': 32,
  151. # Convolutional Decoder
  152. 'hidden_dim': 256,
  153. 'decoder': 'det_decoder',
  154. 'de_num_cls_layers': 2,
  155. 'de_num_reg_layers': 2,
  156. 'de_act': 'silu',
  157. 'de_norm': 'BN',
  158. # Matcher
  159. 'matcher_hpy': {'soft_center_radius': 2.5,
  160. 'topk_candidates': 13,},
  161. # Loss
  162. 'use_vfl': True,
  163. 'loss_coeff': {'class': 1,
  164. 'bbox': 1,
  165. 'giou': 2,},
  166. }
  167. bs = 1
  168. # Create a batch of images & targets
  169. image = torch.randn(bs, 3, 640, 640).cuda()
  170. targets = [{
  171. 'labels': torch.tensor([2, 4, 5, 8]).long().cuda(),
  172. 'boxes': torch.tensor([[0, 0, 10, 10], [12, 23, 56, 70], [0, 10, 20, 30], [50, 60, 55, 150]]).float().cuda() / 640.
  173. }] * bs
  174. # Create model
  175. model = ViTDet(cfg, num_classes=20)
  176. model.train().cuda()
  177. # Create criterion
  178. criterion = build_criterion(cfg, num_classes=20)
  179. # Model inference
  180. t0 = time.time()
  181. outputs = model(image, targets)
  182. t1 = time.time()
  183. print('Infer time: ', t1 - t0)
  184. # Compute loss
  185. loss = criterion(outputs, targets)
  186. for k in loss.keys():
  187. print("{} : {}".format(k, loss[k].item()))
  188. print('==============================')
  189. model.eval()
  190. flops, params = profile(model, inputs=(image, ), verbose=False)
  191. print('==============================')
  192. print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
  193. print('Params : {:.2f} M'.format(params / 1e6))