loss.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. import torch
  2. import torch.nn.functional as F
  3. from utils.box_ops import bbox_iou
  4. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  5. from .matcher import TaskAlignedAssigner
  6. class SetCriterion(object):
  7. def __init__(self, cfg):
  8. # --------------- Basic parameters ---------------
  9. self.cfg = cfg
  10. self.reg_max = cfg.reg_max
  11. self.num_classes = cfg.num_classes
  12. # --------------- Loss config ---------------
  13. self.loss_cls_weight = cfg.loss_cls
  14. self.loss_box_weight = cfg.loss_box
  15. # --------------- Matcher config ---------------
  16. self.matcher = TaskAlignedAssigner(num_classes = cfg.num_classes,
  17. topk_candidates = cfg.tal_topk_candidates,
  18. alpha = cfg.tal_alpha,
  19. beta = cfg.tal_beta
  20. )
  21. def loss_classes(self, pred_cls, labels, scores):
  22. # compute bce loss
  23. alpha = 0.75
  24. gamma = 2.0
  25. # pred and target should be of the same size
  26. bg_class_ind = pred_cls.shape[-1]
  27. pos_inds = ((labels >= 0) & (labels < bg_class_ind)).nonzero().squeeze(1)
  28. new_scores = pred_cls.new_zeros(pred_cls.shape)
  29. pos_labels = labels[pos_inds]
  30. new_scores[pos_inds, pos_labels] = scores[pos_inds].clone().detach()
  31. pred_sigmoid = pred_cls.sigmoid()
  32. focal_weight = new_scores * (new_scores > 0.0).float() + \
  33. alpha * (pred_sigmoid - new_scores).abs().pow(gamma) * \
  34. (new_scores <= 0.0).float()
  35. loss_cls = F.binary_cross_entropy_with_logits(
  36. pred_cls, new_scores, reduction='none') * focal_weight
  37. return loss_cls
  38. def loss_bboxes(self, pred_box, gt_box, bbox_weight):
  39. # regression loss
  40. ious = bbox_iou(pred_box, gt_box, xywh=False, GIoU=True)
  41. loss_box = (1.0 - ious.squeeze(-1)) * bbox_weight
  42. return loss_box
  43. def __call__(self, outputs, targets):
  44. """
  45. outputs['pred_cls']: List(Tensor) [B, M, C]
  46. outputs['pred_reg']: List(Tensor) [B, M, 4*(reg_max+1)]
  47. outputs['pred_box']: List(Tensor) [B, M, 4]
  48. outputs['anchors']: List(Tensor) [M, 2]
  49. outputs['strides']: List(Int) [8, 16, 32] output stride
  50. outputs['stride_tensor']: List(Tensor) [M, 1]
  51. targets: (List) [dict{'boxes': [...],
  52. 'labels': [...],
  53. 'orig_size': ...}, ...]
  54. """
  55. # preds: [B, M, C]
  56. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  57. box_preds = torch.cat(outputs['pred_box'], dim=1)
  58. bs, num_anchors = cls_preds.shape[:2]
  59. device = cls_preds.device
  60. anchors = torch.cat(outputs['anchors'], dim=0)
  61. # --------------- label assignment ---------------
  62. gt_label_targets = []
  63. gt_score_targets = []
  64. gt_bbox_targets = []
  65. fg_masks = []
  66. for batch_idx in range(bs):
  67. tgt_labels = targets[batch_idx]["labels"].to(device) # [Mp,]
  68. tgt_boxs = targets[batch_idx]["boxes"].to(device) # [Mp, 4]
  69. # check target
  70. if len(tgt_labels) == 0 or tgt_boxs.max().item() == 0.:
  71. # There is no valid gt
  72. fg_mask = cls_preds.new_zeros(1, num_anchors).bool() # [1, M,]
  73. gt_label = cls_preds.new_zeros((1, num_anchors)).long() # [1, M,]
  74. gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)).float() # [1, M, C]
  75. gt_box = cls_preds.new_zeros((1, num_anchors, 4)).float() # [1, M, 4]
  76. else:
  77. tgt_labels = tgt_labels[None, :, None] # [1, Mp, 1]
  78. tgt_boxs = tgt_boxs[None] # [1, Mp, 4]
  79. (
  80. gt_label, #
  81. gt_box, # [1, M, 4]
  82. gt_score, # [1, M, C]
  83. fg_mask, # [1, M,]
  84. _
  85. ) = self.matcher(
  86. pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(),
  87. pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
  88. anc_points = anchors,
  89. gt_labels = tgt_labels,
  90. gt_bboxes = tgt_boxs
  91. )
  92. gt_label_targets.append(gt_label)
  93. gt_score_targets.append(gt_score)
  94. gt_bbox_targets.append(gt_box)
  95. fg_masks.append(fg_mask)
  96. # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
  97. fg_masks = torch.cat(fg_masks, 0).view(-1) # [BM,]
  98. gt_label_targets = torch.cat(gt_label_targets, 0).view(-1) # [BM,]
  99. gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes) # [BM, C]
  100. gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4) # [BM, 4]
  101. num_fgs = gt_score_targets.sum()
  102. # Average loss normalizer across all the GPUs
  103. if is_dist_avail_and_initialized():
  104. torch.distributed.all_reduce(num_fgs)
  105. num_fgs = (num_fgs / get_world_size()).clamp(1.0)
  106. # ------------------ Classification loss ------------------
  107. target_labels = torch.where(fg_masks > 0, gt_label_targets,
  108. torch.full_like(gt_label_targets, self.num_classes))
  109. target_scores = gt_score_targets.new_zeros(gt_score_targets.shape[0])
  110. target_scores[fg_masks] = gt_score_targets[fg_masks, target_labels[fg_masks]]
  111. cls_preds = cls_preds.view(-1, self.num_classes)
  112. loss_cls = self.loss_classes(cls_preds, target_labels, target_scores)
  113. loss_cls = loss_cls.sum() / num_fgs
  114. # ------------------ Regression loss ------------------
  115. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  116. box_targets_pos = gt_bbox_targets.view(-1, 4)[fg_masks]
  117. bbox_weight = gt_score_targets[fg_masks].sum(-1)
  118. loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
  119. loss_box = loss_box.sum() / num_fgs
  120. # total loss
  121. losses = loss_cls * self.loss_cls_weight + loss_box * self.loss_box_weight
  122. loss_dict = dict(
  123. loss_cls = loss_cls,
  124. loss_box = loss_box,
  125. losses = losses
  126. )
  127. return loss_dict
  128. if __name__ == "__main__":
  129. pass