loss.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. import torch
  2. import torch.nn.functional as F
  3. from utils.box_ops import bbox2dist, get_ious
  4. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  5. from .matcher import SimOTA
  6. # ----------------------- Criterion for training -----------------------
  7. class Criterion(object):
  8. def __init__(self, args, cfg, device, num_classes=80):
  9. self.cfg = cfg
  10. self.args = args
  11. self.device = device
  12. self.num_classes = num_classes
  13. self.max_epoch = args.max_epoch
  14. self.no_aug_epoch = args.no_aug_epoch
  15. # ---------------- Loss weight ----------------
  16. self.loss_box_aux = cfg['loss_box_aux']
  17. self.loss_cls_weight = cfg['loss_cls_weight']
  18. self.loss_box_weight = cfg['loss_box_weight']
  19. self.loss_dfl_weight = cfg['loss_dfl_weight']
  20. # ---------------- Matcher ----------------
  21. ## Aligned SimOTA assigner
  22. self.matcher_hpy = cfg['matcher_hpy']
  23. self.matcher = SimOTA(num_classes = num_classes,
  24. center_sampling_radius = self.matcher_hpy['center_sampling_radius'],
  25. topk_candidate = self.matcher_hpy['topk_candidate'])
  26. # ----------------- Loss functions -----------------
  27. def loss_classes(self, pred_cls, gt_score):
  28. # compute bce loss
  29. loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
  30. return loss_cls
  31. def loss_bboxes(self, pred_box, gt_box):
  32. # regression loss
  33. ious = get_ious(pred_box, gt_box, 'xyxy', 'giou')
  34. loss_box = 1.0 - ious
  35. return loss_box
  36. def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
  37. # rescale coords by stride
  38. gt_box_s = gt_box / stride
  39. anchor_s = anchor / stride
  40. # compute deltas
  41. gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.cfg['reg_max'] - 1)
  42. gt_left = gt_ltrb_s.to(torch.long)
  43. gt_right = gt_left + 1
  44. weight_left = gt_right.to(torch.float) - gt_ltrb_s
  45. weight_right = 1 - weight_left
  46. # loss left
  47. loss_left = F.cross_entropy(
  48. pred_reg.view(-1, self.cfg['reg_max']),
  49. gt_left.view(-1),
  50. reduction='none').view(gt_left.shape) * weight_left
  51. # loss right
  52. loss_right = F.cross_entropy(
  53. pred_reg.view(-1, self.cfg['reg_max']),
  54. gt_right.view(-1),
  55. reduction='none').view(gt_left.shape) * weight_right
  56. loss_dfl = (loss_left + loss_right).mean(-1)
  57. if bbox_weight is not None:
  58. loss_dfl *= bbox_weight
  59. return loss_dfl
  60. def loss_bboxes_aux(self, pred_delta, gt_box, anchors, stride_tensors):
  61. gt_delta_tl = (anchors - gt_box[..., :2]) / stride_tensors
  62. gt_delta_rb = (gt_box[..., 2:] - anchors) / stride_tensors
  63. gt_delta = torch.cat([gt_delta_tl, gt_delta_rb], dim=1)
  64. loss_box_aux = F.l1_loss(pred_delta, gt_delta, reduction='none')
  65. return loss_box_aux
  66. # ----------------- Main process -----------------
  67. def __call__(self, outputs, targets, epoch=0):
  68. bs = outputs['pred_cls'][0].shape[0]
  69. device = outputs['pred_cls'][0].device
  70. fpn_strides = outputs['strides']
  71. anchors = outputs['anchors']
  72. num_anchors = sum([ab.shape[0] for ab in anchors])
  73. # preds: [B, M, C]
  74. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  75. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  76. box_preds = torch.cat(outputs['pred_box'], dim=1)
  77. # --------------- label assignment ---------------
  78. cls_targets = []
  79. box_targets = []
  80. fg_masks = []
  81. for batch_idx in range(bs):
  82. tgt_labels = targets[batch_idx]["labels"].to(device)
  83. tgt_bboxes = targets[batch_idx]["boxes"].to(device)
  84. # check target
  85. if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
  86. # There is no valid gt
  87. cls_target = cls_preds.new_zeros((num_anchors, self.num_classes))
  88. box_target = cls_preds.new_zeros((0, 4))
  89. fg_mask = cls_preds.new_zeros(num_anchors).bool()
  90. else:
  91. (
  92. fg_mask,
  93. assigned_labels,
  94. assigned_ious,
  95. assigned_indexs
  96. ) = self.matcher(
  97. fpn_strides = fpn_strides,
  98. anchors = anchors,
  99. pred_cls = cls_preds[batch_idx],
  100. pred_box = box_preds[batch_idx],
  101. tgt_labels = tgt_labels,
  102. tgt_bboxes = tgt_bboxes
  103. )
  104. # prepare cls targets
  105. assigned_labels = F.one_hot(assigned_labels.long(), self.num_classes)
  106. assigned_labels = assigned_labels * assigned_ious.unsqueeze(-1)
  107. cls_target = assigned_labels.new_zeros((num_anchors, self.num_classes))
  108. cls_target[fg_mask] = assigned_labels
  109. # prepare box targets
  110. box_target = tgt_bboxes[assigned_indexs]
  111. cls_targets.append(cls_target)
  112. box_targets.append(box_target)
  113. fg_masks.append(fg_mask)
  114. cls_targets = torch.cat(cls_targets, 0)
  115. box_targets = torch.cat(box_targets, 0)
  116. fg_masks = torch.cat(fg_masks, 0)
  117. num_fgs = fg_masks.sum()
  118. # average loss normalizer across all the GPUs
  119. if is_dist_avail_and_initialized():
  120. torch.distributed.all_reduce(num_fgs)
  121. num_fgs = (num_fgs / get_world_size()).clamp(1.0)
  122. # ------------------ Classification loss ------------------
  123. cls_preds = cls_preds.view(-1, self.num_classes)
  124. loss_cls = self.loss_classes(cls_preds, cls_targets)
  125. loss_cls = loss_cls.sum() / num_fgs
  126. # ------------------ Regression loss ------------------
  127. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  128. loss_box = self.loss_bboxes(box_preds_pos, box_targets)
  129. loss_box = loss_box.sum() / num_fgs
  130. # ------------------ Distribution focal loss ------------------
  131. ## process anchors
  132. anchors = torch.cat(anchors, dim=0)
  133. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  134. ## process stride tensors
  135. strides = torch.cat(outputs['stride_tensor'], dim=0)
  136. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  137. ## fg preds
  138. reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
  139. anchors_pos = anchors[fg_masks]
  140. strides_pos = strides[fg_masks]
  141. ## compute dfl
  142. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets, anchors_pos, strides_pos)
  143. loss_dfl = loss_dfl.sum() / num_fgs
  144. # total loss
  145. losses = self.loss_cls_weight * loss_cls + \
  146. self.loss_box_weight * loss_box + \
  147. self.loss_dfl_weight * loss_dfl
  148. loss_dict = dict(
  149. loss_cls = loss_cls,
  150. loss_box = loss_box,
  151. loss_dfl = loss_dfl,
  152. losses = losses
  153. )
  154. # ------------------ Aux regression loss ------------------
  155. if epoch >= (self.max_epoch - self.no_aug_epoch - 1) and self.loss_box_aux:
  156. ## delta_preds
  157. delta_preds = torch.cat(outputs['pred_delta'], dim=1)
  158. delta_preds_pos = delta_preds.view(-1, 4)[fg_masks]
  159. ## aux loss
  160. loss_box_aux = self.loss_bboxes_aux(delta_preds_pos, box_targets, anchors_pos, strides_pos)
  161. loss_box_aux = loss_box_aux.sum() / num_fgs
  162. losses += loss_box_aux
  163. loss_dict['loss_box_aux'] = loss_box_aux
  164. return loss_dict
  165. def build_criterion(args, cfg, device, num_classes):
  166. criterion = Criterion(
  167. args=args,
  168. cfg=cfg,
  169. device=device,
  170. num_classes=num_classes
  171. )
  172. return criterion
  173. if __name__ == "__main__":
  174. pass