loss.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. import torch
  2. import torch.nn.functional as F
  3. from utils.box_ops import bbox2dist, bbox_iou
  4. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  5. from .matcher import TaskAlignedAssigner
  6. class Criterion(object):
  7. def __init__(self, cfg, device, num_classes=80):
  8. # --------------- Basic parameters ---------------
  9. self.cfg = cfg
  10. self.device = device
  11. self.num_classes = num_classes
  12. self.reg_max = cfg['det_head']['reg_max']
  13. # --------------- Loss config ---------------
  14. self.loss_cls_weight = cfg['loss_cls_weight']
  15. self.loss_box_weight = cfg['loss_box_weight']
  16. self.loss_dfl_weight = cfg['loss_dfl_weight']
  17. # --------------- Matcher config ---------------
  18. self.matcher_hpy = cfg['matcher_hpy']
  19. self.matcher = TaskAlignedAssigner(num_classes = num_classes,
  20. topk_candidates = self.matcher_hpy['topk_candidates'],
  21. alpha = self.matcher_hpy['alpha'],
  22. beta = self.matcher_hpy['beta']
  23. )
  24. # -------------------- Basic loss functions --------------------
  25. def loss_classes(self, pred_cls, gt_score):
  26. # Compute BCE loss
  27. loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
  28. return loss_cls
  29. def loss_bboxes(self, pred_box, gt_box, bbox_weight):
  30. # regression loss
  31. ious = bbox_iou(pred_box, gt_box, xywh=False, CIoU=True)
  32. loss_box = (1.0 - ious.squeeze(-1)) * bbox_weight
  33. return loss_box
  34. def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
  35. # rescale coords by stride
  36. gt_box_s = gt_box / stride
  37. anchor_s = anchor / stride
  38. # compute deltas
  39. gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.reg_max - 1)
  40. gt_left = gt_ltrb_s.to(torch.long)
  41. gt_right = gt_left + 1
  42. weight_left = gt_right.to(torch.float) - gt_ltrb_s
  43. weight_right = 1 - weight_left
  44. # loss left
  45. loss_left = F.cross_entropy(
  46. pred_reg.view(-1, self.reg_max),
  47. gt_left.view(-1),
  48. reduction='none').view(gt_left.shape) * weight_left
  49. # loss right
  50. loss_right = F.cross_entropy(
  51. pred_reg.view(-1, self.reg_max),
  52. gt_right.view(-1),
  53. reduction='none').view(gt_left.shape) * weight_right
  54. loss_dfl = (loss_left + loss_right).mean(-1)
  55. if bbox_weight is not None:
  56. loss_dfl *= bbox_weight
  57. return loss_dfl
  58. def compute_det_loss(self, outputs, targets):
  59. """
  60. outputs['pred_cls']: List(Tensor) [B, M, C]
  61. outputs['pred_reg']: List(Tensor) [B, M, 4*(reg_max+1)]
  62. outputs['pred_box']: List(Tensor) [B, M, 4]
  63. outputs['anchors']: List(Tensor) [M, 2]
  64. outputs['strides']: List(Int) [8, 16, 32] output stride
  65. outputs['stride_tensor']: List(Tensor) [M, 1]
  66. targets: (List) [dict{'boxes': [...],
  67. 'labels': [...],
  68. 'orig_size': ...}, ...]
  69. """
  70. bs = outputs['pred_cls'][0].shape[0]
  71. device = outputs['pred_cls'][0].device
  72. strides = outputs['stride_tensor']
  73. anchors = outputs['anchors']
  74. anchors = torch.cat(anchors, dim=0)
  75. num_anchors = anchors.shape[0]
  76. # preds: [B, M, C]
  77. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  78. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  79. box_preds = torch.cat(outputs['pred_box'], dim=1)
  80. # --------------- label assignment ---------------
  81. gt_score_targets = []
  82. gt_bbox_targets = []
  83. fg_masks = []
  84. for batch_idx in range(bs):
  85. tgt_labels = targets[batch_idx]["labels"].to(device) # [Mp,]
  86. tgt_boxs = targets[batch_idx]["boxes"].to(device) # [Mp, 4]
  87. # check target
  88. if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
  89. # There is no valid gt
  90. fg_mask = cls_preds.new_zeros(1, num_anchors).bool() #[1, M,]
  91. gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
  92. gt_box = cls_preds.new_zeros((1, num_anchors, 4)) #[1, M, 4]
  93. else:
  94. tgt_labels = tgt_labels[None, :, None] # [1, Mp, 1]
  95. tgt_boxs = tgt_boxs[None] # [1, Mp, 4]
  96. (
  97. _,
  98. gt_box, # [1, M, 4]
  99. gt_score, # [1, M, C]
  100. fg_mask, # [1, M,]
  101. _
  102. ) = self.matcher(
  103. pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(),
  104. pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
  105. anc_points = anchors,
  106. gt_labels = tgt_labels,
  107. gt_bboxes = tgt_boxs
  108. )
  109. gt_score_targets.append(gt_score)
  110. gt_bbox_targets.append(gt_box)
  111. fg_masks.append(fg_mask)
  112. # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
  113. fg_masks = torch.cat(fg_masks, 0).view(-1) # [BM,]
  114. gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes) # [BM, C]
  115. gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4) # [BM, 4]
  116. num_fgs = gt_score_targets.sum()
  117. # Average loss normalizer across all the GPUs
  118. if is_dist_avail_and_initialized():
  119. torch.distributed.all_reduce(num_fgs)
  120. num_fgs = (num_fgs / get_world_size()).clamp(1.0)
  121. # ------------------ Classification loss ------------------
  122. cls_preds = cls_preds.view(-1, self.num_classes)
  123. loss_cls = self.loss_classes(cls_preds, gt_score_targets)
  124. loss_cls = loss_cls.sum() / num_fgs
  125. # ------------------ Regression loss ------------------
  126. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  127. box_targets_pos = gt_bbox_targets.view(-1, 4)[fg_masks]
  128. bbox_weight = gt_score_targets[fg_masks].sum(-1)
  129. loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
  130. loss_box = loss_box.sum() / num_fgs
  131. # ------------------ Distribution focal loss ------------------
  132. ## process anchors
  133. anchors = torch.cat(outputs['anchors'], dim=0)
  134. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  135. ## process stride tensors
  136. strides = torch.cat(outputs['stride_tensor'], dim=0)
  137. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  138. ## fg preds
  139. reg_preds_pos = reg_preds.view(-1, 4*self.reg_max)[fg_masks]
  140. anchors_pos = anchors[fg_masks]
  141. strides_pos = strides[fg_masks]
  142. ## compute dfl
  143. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos, bbox_weight)
  144. loss_dfl = loss_dfl.sum() / num_fgs
  145. # total loss
  146. losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight + loss_dfl * self.loss_dfl_weight
  147. loss_dict = dict(
  148. loss_cls = loss_cls,
  149. loss_box = loss_box,
  150. loss_dfl = loss_dfl,
  151. losses = losses
  152. )
  153. return loss_dict
  154. def compute_seg_loss(self, outputs, targets):
  155. """
  156. Input:
  157. outputs: (Dict) -> {
  158. 'pred_cls': (List[torch.Tensor] -> [B, M, Nc]),
  159. 'pred_reg': (List[torch.Tensor] -> [B, M, 4]),
  160. 'pred_box': (List[torch.Tensor] -> [B, M, 4]),
  161. 'strides': (List[Int])
  162. }
  163. target: (List[Dict]) [
  164. {'boxes': (torch.Tensor) -> [N, 4],
  165. 'labels': (torch.Tensor) -> [N,],
  166. ...}, ...
  167. ]
  168. Output:
  169. loss_dict: (Dict) -> {
  170. 'loss_cls': (torch.Tensor) It is a scalar.),
  171. 'loss_box': (torch.Tensor) It is a scalar.),
  172. 'loss_box_aux': (torch.Tensor) It is a scalar.),
  173. 'losses': (torch.Tensor) It is a scalar.),
  174. }
  175. """
  176. def compute_pos_loss(self, outputs, targets):
  177. """
  178. Input:
  179. outputs: (Dict) -> {
  180. 'pred_cls': (List[torch.Tensor] -> [B, M, Nc]),
  181. 'pred_reg': (List[torch.Tensor] -> [B, M, 4]),
  182. 'pred_box': (List[torch.Tensor] -> [B, M, 4]),
  183. 'strides': (List[Int])
  184. }
  185. target: (List[Dict]) [
  186. {'boxes': (torch.Tensor) -> [N, 4],
  187. 'labels': (torch.Tensor) -> [N,],
  188. ...}, ...
  189. ]
  190. Output:
  191. loss_dict: (Dict) -> {
  192. 'loss_cls': (torch.Tensor) It is a scalar.),
  193. 'loss_box': (torch.Tensor) It is a scalar.),
  194. 'loss_box_aux': (torch.Tensor) It is a scalar.),
  195. 'losses': (torch.Tensor) It is a scalar.),
  196. }
  197. """
  198. def __call__(self, outputs, targets, epoch=0, task='det'):
  199. # -------------- Detection loss --------------
  200. det_loss_dict = None
  201. if outputs['det_outputs'] is not None:
  202. det_loss_dict = self.compute_det_loss(outputs['det_outputs'], targets)
  203. # -------------- Segmentation loss --------------
  204. seg_loss_dict = None
  205. if outputs['seg_outputs'] is not None:
  206. seg_loss_dict = self.compute_seg_loss(outputs['seg_outputs'], targets)
  207. # -------------- Human pose loss --------------
  208. pos_loss_dict = None
  209. if outputs['pos_outputs'] is not None:
  210. pos_loss_dict = self.compute_seg_loss(outputs['pos_outputs'], targets)
  211. # Loss dict
  212. if task == 'det':
  213. return det_loss_dict
  214. if task == 'det_seg':
  215. return {'det_loss_dict': det_loss_dict,
  216. 'seg_loss_dict': seg_loss_dict}
  217. if task == 'det_pos':
  218. return {'det_loss_dict': det_loss_dict,
  219. 'pos_loss_dict': pos_loss_dict}
  220. if task == 'det_seg_pos':
  221. return {'det_loss_dict': det_loss_dict,
  222. 'seg_loss_dict': seg_loss_dict,
  223. 'pos_loss_dict': pos_loss_dict}
  224. def build_criterion(cfg, device, num_classes):
  225. criterion = Criterion(cfg=cfg, device=device, num_classes=num_classes)
  226. return criterion
  227. if __name__ == "__main__":
  228. pass