loss.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. import torch
  2. import torch.nn.functional as F
  3. from utils.box_ops import bbox2dist, get_ious
  4. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  5. from .matcher import build_matcher
  6. # ----------------------- Criterion for training -----------------------
  7. class Criterion(object):
  8. def __init__(self, args, cfg, device, num_classes=80):
  9. self.cfg = cfg
  10. self.args = args
  11. self.device = device
  12. self.num_classes = num_classes
  13. self.max_epoch = args.max_epoch
  14. self.no_aug_epoch = args.no_aug_epoch
  15. self.use_ema_update = cfg['ema_update']
  16. self.loss_box_aux = cfg['loss_box_aux']
  17. # ---------------- Loss weight ----------------
  18. loss_weights = cfg['loss_weights'][cfg['matcher']]
  19. self.loss_cls_weight = loss_weights['loss_cls_weight']
  20. self.loss_box_weight = loss_weights['loss_box_weight']
  21. self.loss_dfl_weight = loss_weights['loss_dfl_weight']
  22. # ---------------- Matcher ----------------
  23. ## Aligned SimOTA assigner
  24. self.matcher = build_matcher(cfg, num_classes)
  25. def ema_update(self, name: str, value, initial_value, momentum=0.9):
  26. if hasattr(self, name):
  27. old = getattr(self, name)
  28. else:
  29. old = initial_value
  30. new = old * momentum + value * (1 - momentum)
  31. setattr(self, name, new)
  32. return new
  33. # ----------------- Loss functions -----------------
  34. def loss_classes(self, pred_cls, gt_score):
  35. # compute bce loss
  36. loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
  37. return loss_cls
  38. def loss_classes_qfl(self, pred_cls, target, beta=2.0):
  39. # Quality FocalLoss
  40. """
  41. pred_cls: (torch.Tensor): [N, C]。
  42. target: (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N,)
  43. """
  44. label, score = target
  45. pred_sigmoid = pred_cls.sigmoid()
  46. scale_factor = pred_sigmoid
  47. zerolabel = scale_factor.new_zeros(pred_cls.shape)
  48. ce_loss = F.binary_cross_entropy_with_logits(
  49. pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
  50. bg_class_ind = pred_cls.shape[-1]
  51. pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
  52. pos_label = label[pos].long()
  53. scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
  54. ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
  55. pred_cls[pos, pos_label], score[pos],
  56. reduction='none') * scale_factor.abs().pow(beta)
  57. return ce_loss
  58. def loss_bboxes(self, pred_box, gt_box):
  59. # regression loss
  60. ious = get_ious(pred_box, gt_box, 'xyxy', 'giou')
  61. loss_box = 1.0 - ious
  62. return loss_box
  63. def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
  64. # rescale coords by stride
  65. gt_box_s = gt_box / stride
  66. anchor_s = anchor / stride
  67. # compute deltas
  68. gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.cfg['reg_max'] - 1)
  69. gt_left = gt_ltrb_s.to(torch.long)
  70. gt_right = gt_left + 1
  71. weight_left = gt_right.to(torch.float) - gt_ltrb_s
  72. weight_right = 1 - weight_left
  73. # loss left
  74. loss_left = F.cross_entropy(
  75. pred_reg.view(-1, self.cfg['reg_max']),
  76. gt_left.view(-1),
  77. reduction='none').view(gt_left.shape) * weight_left
  78. # loss right
  79. loss_right = F.cross_entropy(
  80. pred_reg.view(-1, self.cfg['reg_max']),
  81. gt_right.view(-1),
  82. reduction='none').view(gt_left.shape) * weight_right
  83. loss_dfl = (loss_left + loss_right).mean(-1)
  84. if bbox_weight is not None:
  85. loss_dfl *= bbox_weight
  86. return loss_dfl
  87. def loss_bboxes_aux(self, pred_delta, gt_box, anchors, stride_tensors):
  88. gt_delta_tl = (anchors - gt_box[..., :2]) / stride_tensors
  89. gt_delta_rb = (gt_box[..., 2:] - anchors) / stride_tensors
  90. gt_delta = torch.cat([gt_delta_tl, gt_delta_rb], dim=1)
  91. loss_box_aux = F.l1_loss(pred_delta, gt_delta, reduction='none')
  92. return loss_box_aux
  93. # ----------------- Main process -----------------
  94. def loss_simota(self, outputs, targets, epoch=0):
  95. bs = outputs['pred_cls'][0].shape[0]
  96. device = outputs['pred_cls'][0].device
  97. fpn_strides = outputs['strides']
  98. anchors = outputs['anchors']
  99. num_anchors = sum([ab.shape[0] for ab in anchors])
  100. # preds: [B, M, C]
  101. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  102. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  103. box_preds = torch.cat(outputs['pred_box'], dim=1)
  104. # --------------- label assignment ---------------
  105. cls_targets = []
  106. box_targets = []
  107. fg_masks = []
  108. for batch_idx in range(bs):
  109. tgt_labels = targets[batch_idx]["labels"].to(device)
  110. tgt_bboxes = targets[batch_idx]["boxes"].to(device)
  111. # check target
  112. if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
  113. # There is no valid gt
  114. cls_target = cls_preds.new_zeros((num_anchors, self.num_classes))
  115. box_target = cls_preds.new_zeros((0, 4))
  116. fg_mask = cls_preds.new_zeros(num_anchors).bool()
  117. else:
  118. (
  119. fg_mask,
  120. assigned_labels,
  121. assigned_ious,
  122. assigned_indexs
  123. ) = self.matcher(
  124. fpn_strides = fpn_strides,
  125. anchors = anchors,
  126. pred_cls = cls_preds[batch_idx],
  127. pred_box = box_preds[batch_idx],
  128. tgt_labels = tgt_labels,
  129. tgt_bboxes = tgt_bboxes
  130. )
  131. # prepare cls targets
  132. assigned_labels = F.one_hot(assigned_labels.long(), self.num_classes)
  133. assigned_labels = assigned_labels * assigned_ious.unsqueeze(-1)
  134. cls_target = assigned_labels.new_zeros((num_anchors, self.num_classes))
  135. cls_target[fg_mask] = assigned_labels
  136. # prepare box targets
  137. box_target = tgt_bboxes[assigned_indexs]
  138. cls_targets.append(cls_target)
  139. box_targets.append(box_target)
  140. fg_masks.append(fg_mask)
  141. cls_targets = torch.cat(cls_targets, 0)
  142. box_targets = torch.cat(box_targets, 0)
  143. fg_masks = torch.cat(fg_masks, 0)
  144. num_fgs = fg_masks.sum()
  145. # average loss normalizer across all the GPUs
  146. if is_dist_avail_and_initialized():
  147. torch.distributed.all_reduce(num_fgs)
  148. num_fgs = (num_fgs / get_world_size()).clamp(1.0)
  149. # update loss normalizer with EMA
  150. if self.use_ema_update:
  151. normalizer = self.ema_update("loss_normalizer", max(num_fgs, 1), 100)
  152. else:
  153. normalizer = num_fgs
  154. # ------------------ Classification loss ------------------
  155. cls_preds = cls_preds.view(-1, self.num_classes)
  156. loss_cls = self.loss_classes(cls_preds, cls_targets)
  157. loss_cls = loss_cls.sum() / normalizer
  158. # ------------------ Regression loss ------------------
  159. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  160. loss_box = self.loss_bboxes(box_preds_pos, box_targets)
  161. loss_box = loss_box.sum() / normalizer
  162. # ------------------ Distribution focal loss ------------------
  163. ## process anchors
  164. anchors = torch.cat(anchors, dim=0)
  165. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  166. ## process stride tensors
  167. strides = torch.cat(outputs['stride_tensor'], dim=0)
  168. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  169. ## fg preds
  170. reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
  171. anchors_pos = anchors[fg_masks]
  172. strides_pos = strides[fg_masks]
  173. ## compute dfl
  174. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets, anchors_pos, strides_pos)
  175. loss_dfl = loss_dfl.sum() / normalizer
  176. # total loss
  177. losses = self.loss_cls_weight * loss_cls + \
  178. self.loss_box_weight * loss_box + \
  179. self.loss_dfl_weight * loss_dfl
  180. loss_dict = dict(
  181. loss_cls = loss_cls,
  182. loss_box = loss_box,
  183. loss_dfl = loss_dfl,
  184. losses = losses
  185. )
  186. # ------------------ Aux regression loss ------------------
  187. if epoch >= (self.max_epoch - self.no_aug_epoch - 1) and self.loss_box_aux:
  188. ## delta_preds
  189. delta_preds = torch.cat(outputs['pred_delta'], dim=1)
  190. delta_preds_pos = delta_preds.view(-1, 4)[fg_masks]
  191. ## aux loss
  192. loss_box_aux = self.loss_bboxes_aux(delta_preds_pos, box_targets, anchors_pos, strides_pos)
  193. loss_box_aux = loss_box_aux.sum() / normalizer
  194. losses += loss_box_aux
  195. loss_dict['loss_box_aux'] = loss_box_aux
  196. return loss_dict
  197. def loss_aligned_simota(self, outputs, targets, epoch=0):
  198. """
  199. outputs['pred_cls']: List(Tensor) [B, M, C]
  200. outputs['pred_box']: List(Tensor) [B, M, 4]
  201. outputs['strides']: List(Int) [8, 16, 32] output stride
  202. targets: (List) [dict{'boxes': [...],
  203. 'labels': [...],
  204. 'orig_size': ...}, ...]
  205. """
  206. bs = outputs['pred_cls'][0].shape[0]
  207. device = outputs['pred_cls'][0].device
  208. fpn_strides = outputs['strides']
  209. anchors = outputs['anchors']
  210. # preds: [B, M, C]
  211. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  212. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  213. box_preds = torch.cat(outputs['pred_box'], dim=1)
  214. # --------------- label assignment ---------------
  215. cls_targets = []
  216. box_targets = []
  217. assign_metrics = []
  218. for batch_idx in range(bs):
  219. tgt_labels = targets[batch_idx]["labels"].to(device) # [N,]
  220. tgt_bboxes = targets[batch_idx]["boxes"].to(device) # [N, 4]
  221. # label assignment
  222. assigned_result = self.matcher(fpn_strides=fpn_strides,
  223. anchors=anchors,
  224. pred_cls=cls_preds[batch_idx].detach(),
  225. pred_box=box_preds[batch_idx].detach(),
  226. gt_labels=tgt_labels,
  227. gt_bboxes=tgt_bboxes
  228. )
  229. cls_targets.append(assigned_result['assigned_labels'])
  230. box_targets.append(assigned_result['assigned_bboxes'])
  231. assign_metrics.append(assigned_result['assign_metrics'])
  232. cls_targets = torch.cat(cls_targets, dim=0)
  233. box_targets = torch.cat(box_targets, dim=0)
  234. assign_metrics = torch.cat(assign_metrics, dim=0)
  235. # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
  236. bg_class_ind = self.num_classes
  237. pos_inds = ((cls_targets >= 0)
  238. & (cls_targets < bg_class_ind)).nonzero().squeeze(1)
  239. num_fgs = assign_metrics.sum()
  240. if is_dist_avail_and_initialized():
  241. torch.distributed.all_reduce(num_fgs)
  242. num_fgs = (num_fgs / get_world_size()).clamp(1.0).item()
  243. # update loss normalizer with EMA
  244. if self.use_ema_update:
  245. normalizer = self.ema_update("loss_normalizer", max(num_fgs, 1), 100)
  246. else:
  247. normalizer = num_fgs
  248. # ---------------------------- Classification loss ----------------------------
  249. cls_preds = cls_preds.view(-1, self.num_classes)
  250. loss_cls = self.loss_classes_qfl(cls_preds, (cls_targets, assign_metrics))
  251. loss_cls = loss_cls.sum() / normalizer
  252. # ---------------------------- Regression loss ----------------------------
  253. box_preds_pos = box_preds.view(-1, 4)[pos_inds]
  254. box_targets_pos = box_targets[pos_inds]
  255. box_weight_pos = assign_metrics[pos_inds]
  256. loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos)
  257. loss_box *= box_weight_pos
  258. loss_box = loss_box.sum() / normalizer
  259. # ------------------ Distribution focal loss ------------------
  260. ## process anchors
  261. anchors = torch.cat(anchors, dim=0)
  262. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  263. ## process stride tensors
  264. strides = torch.cat(outputs['stride_tensor'], dim=0)
  265. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  266. ## fg preds
  267. reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[pos_inds]
  268. anchors_pos = anchors[pos_inds]
  269. strides_pos = strides[pos_inds]
  270. ## compute dfl
  271. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos)
  272. loss_dfl *= box_weight_pos
  273. loss_dfl = loss_dfl.sum() / normalizer
  274. # total loss
  275. losses = self.loss_cls_weight * loss_cls + \
  276. self.loss_box_weight * loss_box + \
  277. self.loss_dfl_weight * loss_dfl
  278. loss_dict = dict(
  279. loss_cls = loss_cls,
  280. loss_box = loss_box,
  281. loss_dfl = loss_dfl,
  282. losses = losses
  283. )
  284. # ------------------ Aux regression loss ------------------
  285. if epoch >= (self.max_epoch - self.no_aug_epoch - 1) and self.loss_box_aux:
  286. ## delta_preds
  287. delta_preds = torch.cat(outputs['pred_delta'], dim=1)
  288. delta_preds_pos = delta_preds.view(-1, 4)[pos_inds]
  289. ## aux loss
  290. loss_box_aux = self.loss_bboxes_aux(delta_preds_pos, box_targets_pos, anchors_pos, strides_pos)
  291. loss_box_aux = loss_box_aux.sum() / normalizer
  292. losses += loss_box_aux
  293. loss_dict['loss_box_aux'] = loss_box_aux
  294. return loss_dict
  295. def __call__(self, outputs, targets, epoch=0):
  296. if self.cfg['matcher'] == "simota":
  297. return self.loss_simota(outputs, targets, epoch)
  298. elif self.cfg['matcher'] == "aligned_simota":
  299. return self.loss_aligned_simota(outputs, targets, epoch)
  300. def build_criterion(args, cfg, device, num_classes):
  301. criterion = Criterion(
  302. args=args,
  303. cfg=cfg,
  304. device=device,
  305. num_classes=num_classes
  306. )
  307. return criterion
  308. if __name__ == "__main__":
  309. pass