loss.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. import torch
  2. import torch.nn.functional as F
  3. from utils.box_ops import bbox2dist, get_ious
  4. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  5. from .matcher import TaskAlignedAssigner, AlignedSimOTA
  6. class Criterion(object):
  7. def __init__(self, args, cfg, device, num_classes=80):
  8. self.cfg = cfg
  9. self.args = args
  10. self.device = device
  11. self.num_classes = num_classes
  12. self.use_ema_update = cfg['ema_update']
  13. # ---------------- Loss weight ----------------
  14. self.loss_cls_weight = cfg['loss_cls_weight']
  15. self.loss_box_weight = cfg['loss_box_weight']
  16. self.loss_dfl_weight = cfg['loss_dfl_weight']
  17. # ---------------- Matcher ----------------
  18. matcher_config = cfg['matcher']
  19. ## TAL assigner
  20. self.tal_matcher = TaskAlignedAssigner(
  21. topk=matcher_config['tal']['topk'],
  22. alpha=matcher_config['tal']['alpha'],
  23. beta=matcher_config['tal']['beta'],
  24. num_classes=num_classes
  25. )
  26. ## SimOTA assigner
  27. self.ota_matcher = AlignedSimOTA(
  28. center_sampling_radius=matcher_config['ota']['center_sampling_radius'],
  29. topk_candidate=matcher_config['ota']['topk_candidate'],
  30. num_classes=num_classes
  31. )
  32. def __call__(self, outputs, targets, epoch=0):
  33. if epoch < self.args.max_epoch // 2:
  34. return self.ota_loss(outputs, targets)
  35. else:
  36. return self.tal_loss(outputs, targets)
  37. def ema_update(self, name: str, value, initial_value, momentum=0.9):
  38. if hasattr(self, name):
  39. old = getattr(self, name)
  40. else:
  41. old = initial_value
  42. new = old * momentum + value * (1 - momentum)
  43. setattr(self, name, new)
  44. return new
  45. # ----------------- Loss functions -----------------
  46. def loss_classes(self, pred_cls, gt_score, gt_label=None, vfl=False):
  47. if vfl:
  48. assert gt_label is not None
  49. # compute varifocal loss
  50. alpha, gamma = 0.75, 2.0
  51. focal_weight = alpha * pred_cls.sigmoid().pow(gamma) * (1 - gt_label) + gt_score * gt_label
  52. bce_loss = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
  53. loss_cls = bce_loss * focal_weight
  54. else:
  55. # compute bce loss
  56. loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_score, reduction='none')
  57. return loss_cls
  58. def loss_bboxes(self, pred_box, gt_box, bbox_weight=None):
  59. # regression loss
  60. ious = get_ious(pred_box, gt_box, 'xyxy', 'giou')
  61. loss_box = 1.0 - ious
  62. if bbox_weight is not None:
  63. loss_box *= bbox_weight
  64. return loss_box
  65. def loss_dfl(self, pred_reg, gt_box, anchor, stride, bbox_weight=None):
  66. # rescale coords by stride
  67. gt_box_s = gt_box / stride
  68. anchor_s = anchor / stride
  69. # compute deltas
  70. gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.cfg['reg_max'] - 1)
  71. gt_left = gt_ltrb_s.to(torch.long)
  72. gt_right = gt_left + 1
  73. weight_left = gt_right.to(torch.float) - gt_ltrb_s
  74. weight_right = 1 - weight_left
  75. # loss left
  76. loss_left = F.cross_entropy(
  77. pred_reg.view(-1, self.cfg['reg_max']),
  78. gt_left.view(-1),
  79. reduction='none').view(gt_left.shape) * weight_left
  80. # loss right
  81. loss_right = F.cross_entropy(
  82. pred_reg.view(-1, self.cfg['reg_max']),
  83. gt_right.view(-1),
  84. reduction='none').view(gt_left.shape) * weight_right
  85. loss_dfl = (loss_left + loss_right).mean(-1)
  86. if bbox_weight is not None:
  87. loss_dfl *= bbox_weight
  88. return loss_dfl
  89. # ----------------- Loss with TAL assigner -----------------
  90. def tal_loss(self, outputs, targets):
  91. """ Compute loss with TAL assigner """
  92. bs = outputs['pred_cls'][0].shape[0]
  93. device = outputs['pred_cls'][0].device
  94. anchors = torch.cat(outputs['anchors'], dim=0)
  95. num_anchors = anchors.shape[0]
  96. # preds: [B, M, C]
  97. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  98. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  99. box_preds = torch.cat(outputs['pred_box'], dim=1)
  100. # --------------- label assignment ---------------
  101. gt_label_targets = []
  102. gt_score_targets = []
  103. gt_bbox_targets = []
  104. fg_masks = []
  105. for batch_idx in range(bs):
  106. tgt_labels = targets[batch_idx]["labels"].to(device)
  107. tgt_bboxes = targets[batch_idx]["boxes"].to(device)
  108. # check target
  109. if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
  110. # There is no valid gt
  111. fg_mask = cls_preds.new_zeros(1, num_anchors).bool() #[1, M,]
  112. gt_label = cls_preds.new_zeros((1, num_anchors,)) #[1, M,]
  113. gt_score = cls_preds.new_zeros((1, num_anchors, self.num_classes)) #[1, M, C]
  114. gt_box = cls_preds.new_zeros((1, num_anchors, 4)) #[1, M, 4]
  115. else:
  116. tgt_labels = tgt_labels[None, :, None] # [1, Mp, 1]
  117. tgt_bboxes = tgt_bboxes[None] # [1, Mp, 4]
  118. (
  119. gt_label, #[1, M]
  120. gt_box, #[1, M, 4]
  121. gt_score, #[1, M, C]
  122. fg_mask, #[1, M,]
  123. _
  124. ) = self.tal_matcher(
  125. pd_scores = cls_preds[batch_idx:batch_idx+1].detach().sigmoid(),
  126. pd_bboxes = box_preds[batch_idx:batch_idx+1].detach(),
  127. anc_points = anchors,
  128. gt_labels = tgt_labels,
  129. gt_bboxes = tgt_bboxes
  130. )
  131. gt_label_targets.append(gt_label)
  132. gt_score_targets.append(gt_score)
  133. gt_bbox_targets.append(gt_box)
  134. fg_masks.append(fg_mask)
  135. # List[B, 1, M, C] -> Tensor[B, M, C] -> Tensor[BM, C]
  136. fg_masks = torch.cat(fg_masks, 0).view(-1) # [BM,]
  137. gt_score_targets = torch.cat(gt_score_targets, 0).view(-1, self.num_classes) # [BM, C]
  138. gt_bbox_targets = torch.cat(gt_bbox_targets, 0).view(-1, 4) # [BM, 4]
  139. gt_label_targets = torch.cat(gt_label_targets, 0).view(-1) # [BM,]
  140. gt_label_targets = torch.where(fg_masks > 0, gt_label_targets, torch.full_like(gt_label_targets, self.num_classes))
  141. gt_labels_one_hot = F.one_hot(gt_label_targets.long(), self.num_classes + 1)[..., :-1]
  142. bbox_weight = gt_score_targets[fg_masks].sum(-1)
  143. num_fgs = max(gt_score_targets.sum(), 1)
  144. # average loss normalizer across all the GPUs
  145. if is_dist_avail_and_initialized():
  146. torch.distributed.all_reduce(num_fgs)
  147. num_fgs = max(num_fgs / get_world_size(), 1.0)
  148. # update loss normalizer with EMA
  149. if self.use_ema_update:
  150. normalizer = self.ema_update("loss_normalizer", max(num_fgs, 1), 100)
  151. else:
  152. normalizer = num_fgs
  153. # ------------------ Classification loss ------------------
  154. cls_preds = cls_preds.view(-1, self.num_classes)
  155. loss_cls = self.loss_classes(cls_preds, gt_score_targets, gt_labels_one_hot, vfl=False)
  156. loss_cls = loss_cls.sum() / normalizer
  157. # ------------------ Regression loss ------------------
  158. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  159. box_targets_pos = gt_bbox_targets[fg_masks]
  160. loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos, bbox_weight)
  161. loss_box = loss_box.sum() / normalizer
  162. # ------------------ Distribution focal loss ------------------
  163. ## process anchors
  164. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  165. ## process stride tensors
  166. strides = torch.cat(outputs['stride_tensor'], dim=0)
  167. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  168. ## fg preds
  169. reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
  170. anchors_pos = anchors[fg_masks]
  171. strides_pos = strides[fg_masks]
  172. ## compute dfl
  173. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos, bbox_weight)
  174. loss_dfl = loss_dfl.sum() / normalizer
  175. # total loss
  176. losses = self.loss_cls_weight['tal'] * loss_cls + \
  177. self.loss_box_weight['tal'] * loss_box + \
  178. self.loss_dfl_weight['tal'] * loss_dfl
  179. loss_dict = dict(
  180. loss_cls = loss_cls,
  181. loss_box = loss_box,
  182. loss_dfl = loss_dfl,
  183. losses = losses
  184. )
  185. return loss_dict
  186. # ----------------- Loss with SimOTA assigner -----------------
  187. def ota_loss(self, outputs, targets):
  188. """ Compute loss with SimOTA assigner """
  189. bs = outputs['pred_cls'][0].shape[0]
  190. device = outputs['pred_cls'][0].device
  191. fpn_strides = outputs['strides']
  192. anchors = outputs['anchors']
  193. num_anchors = sum([ab.shape[0] for ab in anchors])
  194. # preds: [B, M, C]
  195. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  196. reg_preds = torch.cat(outputs['pred_reg'], dim=1)
  197. box_preds = torch.cat(outputs['pred_box'], dim=1)
  198. # --------------- label assignment ---------------
  199. cls_targets = []
  200. box_targets = []
  201. fg_masks = []
  202. for batch_idx in range(bs):
  203. tgt_labels = targets[batch_idx]["labels"].to(device)
  204. tgt_bboxes = targets[batch_idx]["boxes"].to(device)
  205. # check target
  206. if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
  207. # There is no valid gt
  208. cls_target = cls_preds.new_zeros((num_anchors, self.num_classes))
  209. box_target = cls_preds.new_zeros((0, 4))
  210. fg_mask = cls_preds.new_zeros(num_anchors).bool()
  211. else:
  212. (
  213. fg_mask,
  214. assigned_labels,
  215. assigned_ious,
  216. assigned_indexs
  217. ) = self.ota_matcher(
  218. fpn_strides = fpn_strides,
  219. anchors = anchors,
  220. pred_cls = cls_preds[batch_idx],
  221. pred_box = box_preds[batch_idx],
  222. tgt_labels = tgt_labels,
  223. tgt_bboxes = tgt_bboxes
  224. )
  225. # prepare cls targets
  226. assigned_labels = F.one_hot(assigned_labels.long(), self.num_classes)
  227. assigned_labels = assigned_labels * assigned_ious.unsqueeze(-1)
  228. cls_target = assigned_labels.new_zeros((num_anchors, self.num_classes))
  229. cls_target[fg_mask] = assigned_labels
  230. # prepare box targets
  231. box_target = tgt_bboxes[assigned_indexs]
  232. cls_targets.append(cls_target)
  233. box_targets.append(box_target)
  234. fg_masks.append(fg_mask)
  235. cls_targets = torch.cat(cls_targets, 0)
  236. box_targets = torch.cat(box_targets, 0)
  237. fg_masks = torch.cat(fg_masks, 0)
  238. num_fgs = fg_masks.sum()
  239. # average loss normalizer across all the GPUs
  240. if is_dist_avail_and_initialized():
  241. torch.distributed.all_reduce(num_fgs)
  242. num_fgs = (num_fgs / get_world_size()).clamp(1.0)
  243. # update loss normalizer with EMA
  244. if self.use_ema_update:
  245. normalizer = self.ema_update("loss_normalizer", max(num_fgs, 1), 100)
  246. else:
  247. normalizer = num_fgs
  248. # ------------------ Classification loss ------------------
  249. cls_preds = cls_preds.view(-1, self.num_classes)
  250. loss_cls = self.loss_classes(cls_preds, cls_targets)
  251. loss_cls = loss_cls.sum() / normalizer
  252. # ------------------ Regression loss ------------------
  253. box_preds_pos = box_preds.view(-1, 4)[fg_masks]
  254. loss_box = self.loss_bboxes(box_preds_pos, box_targets)
  255. loss_box = loss_box.sum() / normalizer
  256. # ------------------ Distribution focal loss ------------------
  257. ## process anchors
  258. anchors = torch.cat(anchors, dim=0)
  259. anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
  260. ## process stride tensors
  261. strides = torch.cat(outputs['stride_tensor'], dim=0)
  262. strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
  263. ## fg preds
  264. reg_preds_pos = reg_preds.view(-1, 4*self.cfg['reg_max'])[fg_masks]
  265. anchors_pos = anchors[fg_masks]
  266. strides_pos = strides[fg_masks]
  267. ## compute dfl
  268. loss_dfl = self.loss_dfl(reg_preds_pos, box_targets, anchors_pos, strides_pos)
  269. loss_dfl = loss_dfl.sum() / normalizer
  270. # total loss
  271. losses = self.loss_cls_weight['ota'] * loss_cls + \
  272. self.loss_box_weight['ota'] * loss_box + \
  273. self.loss_dfl_weight['ota'] * loss_dfl
  274. loss_dict = dict(
  275. loss_cls = loss_cls,
  276. loss_box = loss_box,
  277. loss_dfl = loss_dfl,
  278. losses = losses
  279. )
  280. return loss_dict
  281. def build_criterion(args, cfg, device, num_classes):
  282. criterion = Criterion(
  283. args=args,
  284. cfg=cfg,
  285. device=device,
  286. num_classes=num_classes
  287. )
  288. return criterion
  289. if __name__ == "__main__":
  290. pass