criterion.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. from utils.box_ops import get_ious
  5. from utils.misc import sigmoid_focal_loss
  6. from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
  7. from .matcher import FcosMatcher, SimOtaMatcher
  8. class SetCriterion(nn.Module):
  9. def __init__(self, cfg):
  10. super().__init__()
  11. # ------------- Basic parameters -------------
  12. self.cfg = cfg
  13. self.num_classes = cfg.num_classes
  14. # ------------- Focal loss -------------
  15. self.alpha = cfg.focal_loss_alpha
  16. self.gamma = cfg.focal_loss_gamma
  17. # ------------- Loss weight -------------
  18. # ------------- Matcher & Loss weight -------------
  19. self.matcher_cfg = cfg.matcher_hpy
  20. if cfg.matcher == 'fcos_matcher':
  21. self.weight_dict = {'loss_cls': cfg.loss_cls_weight,
  22. 'loss_reg': cfg.loss_reg_weight,
  23. 'loss_ctn': cfg.loss_ctn_weight}
  24. self.matcher = FcosMatcher(cfg.num_classes,
  25. self.matcher_cfg['center_sampling_radius'],
  26. self.matcher_cfg['object_sizes_of_interest'],
  27. [1., 1., 1., 1.]
  28. )
  29. elif cfg.matcher == 'simota':
  30. self.weight_dict = {'loss_cls': cfg.loss_cls_weight,
  31. 'loss_reg': cfg.loss_reg_weight}
  32. self.matcher = SimOtaMatcher(cfg.num_classes,
  33. self.matcher_cfg['soft_center_radius'],
  34. self.matcher_cfg['topk_candidates'])
  35. else:
  36. raise NotImplementedError("Unknown matcher: {}.".format(cfg.matcher))
  37. def loss_labels(self, pred_cls, tgt_cls, num_boxes=1.0):
  38. """
  39. pred_cls: (Tensor) [N, C]
  40. tgt_cls: (Tensor) [N, C]
  41. """
  42. # cls loss: [V, C]
  43. loss_cls = sigmoid_focal_loss(pred_cls, tgt_cls, self.alpha, self.gamma)
  44. return loss_cls.sum() / num_boxes
  45. def loss_labels_qfl(self, pred_cls, target, beta=2.0, num_boxes=1.0):
  46. # Quality FocalLoss
  47. """
  48. pred_cls: (torch.Tensor): [N, C]。
  49. target: (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N)
  50. """
  51. label, score = target
  52. pred_sigmoid = pred_cls.sigmoid()
  53. scale_factor = pred_sigmoid
  54. zerolabel = scale_factor.new_zeros(pred_cls.shape)
  55. ce_loss = F.binary_cross_entropy_with_logits(
  56. pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
  57. bg_class_ind = pred_cls.shape[-1]
  58. pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
  59. if pos.shape[0] > 0:
  60. pos_label = label[pos].long()
  61. scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
  62. ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
  63. pred_cls[pos, pos_label], score[pos],
  64. reduction='none') * scale_factor.abs().pow(beta)
  65. return ce_loss.sum() / num_boxes
  66. def loss_bboxes_ltrb(self, pred_delta, tgt_delta, bbox_quality=None, num_boxes=1.0):
  67. """
  68. pred_box: (Tensor) [N, 4]
  69. tgt_box: (Tensor) [N, 4]
  70. """
  71. pred_delta = torch.cat((-pred_delta[..., :2], pred_delta[..., 2:]), dim=-1)
  72. tgt_delta = torch.cat((-tgt_delta[..., :2], tgt_delta[..., 2:]), dim=-1)
  73. eps = torch.finfo(torch.float32).eps
  74. pred_area = (pred_delta[..., 2] - pred_delta[..., 0]).clamp_(min=0) \
  75. * (pred_delta[..., 3] - pred_delta[..., 1]).clamp_(min=0)
  76. tgt_area = (tgt_delta[..., 2] - tgt_delta[..., 0]).clamp_(min=0) \
  77. * (tgt_delta[..., 3] - tgt_delta[..., 1]).clamp_(min=0)
  78. w_intersect = (torch.min(pred_delta[..., 2], tgt_delta[..., 2])
  79. - torch.max(pred_delta[..., 0], tgt_delta[..., 0])).clamp_(min=0)
  80. h_intersect = (torch.min(pred_delta[..., 3], tgt_delta[..., 3])
  81. - torch.max(pred_delta[..., 1], tgt_delta[..., 1])).clamp_(min=0)
  82. area_intersect = w_intersect * h_intersect
  83. area_union = tgt_area + pred_area - area_intersect
  84. ious = area_intersect / area_union.clamp(min=eps)
  85. # giou
  86. g_w_intersect = torch.max(pred_delta[..., 2], tgt_delta[..., 2]) \
  87. - torch.min(pred_delta[..., 0], tgt_delta[..., 0])
  88. g_h_intersect = torch.max(pred_delta[..., 3], tgt_delta[..., 3]) \
  89. - torch.min(pred_delta[..., 1], tgt_delta[..., 1])
  90. ac_uion = g_w_intersect * g_h_intersect
  91. gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
  92. loss_box = 1 - gious
  93. if bbox_quality is not None:
  94. loss_box = loss_box * bbox_quality.view(loss_box.size())
  95. return loss_box.sum() / num_boxes
  96. def loss_bboxes_xyxy(self, pred_box, gt_box, num_boxes=1.0):
  97. ious = get_ious(pred_box, gt_box, box_mode="xyxy", iou_type='giou')
  98. loss_box = 1.0 - ious
  99. return loss_box.sum() / num_boxes
  100. def fcos_loss(self, outputs, targets):
  101. """
  102. outputs['pred_cls']: (Tensor) [B, M, C]
  103. outputs['pred_reg']: (Tensor) [B, M, 4]
  104. outputs['pred_ctn']: (Tensor) [B, M, 1]
  105. outputs['strides']: (List) [8, 16, 32, ...] stride of the model output
  106. targets: (List) [dict{'boxes': [...],
  107. 'labels': [...],
  108. 'orig_size': ...}, ...]
  109. """
  110. # -------------------- Pre-process --------------------
  111. device = outputs['pred_cls'][0].device
  112. fpn_strides = outputs['strides']
  113. anchors = outputs['anchors']
  114. pred_cls = torch.cat(outputs['pred_cls'], dim=1).view(-1, self.num_classes)
  115. pred_delta = torch.cat(outputs['pred_reg'], dim=1).view(-1, 4)
  116. pred_ctn = torch.cat(outputs['pred_ctn'], dim=1).view(-1, 1)
  117. masks = ~torch.cat(outputs['mask'], dim=1).view(-1)
  118. # -------------------- Label Assignment --------------------
  119. gt_classes, gt_deltas, gt_centerness = self.matcher(fpn_strides, anchors, targets)
  120. gt_classes = gt_classes.flatten().to(device)
  121. gt_deltas = gt_deltas.view(-1, 4).to(device)
  122. gt_centerness = gt_centerness.view(-1, 1).to(device)
  123. foreground_idxs = (gt_classes >= 0) & (gt_classes != self.num_classes)
  124. num_foreground = foreground_idxs.sum()
  125. if is_dist_avail_and_initialized():
  126. torch.distributed.all_reduce(num_foreground)
  127. num_foreground = torch.clamp(num_foreground / get_world_size(), min=1).item()
  128. num_foreground_centerness = gt_centerness[foreground_idxs].sum()
  129. if is_dist_avail_and_initialized():
  130. torch.distributed.all_reduce(num_foreground_centerness)
  131. num_targets = torch.clamp(num_foreground_centerness / get_world_size(), min=1).item()
  132. # -------------------- classification loss --------------------
  133. gt_classes_target = torch.zeros_like(pred_cls)
  134. gt_classes_target[foreground_idxs, gt_classes[foreground_idxs]] = 1
  135. valid_idxs = (gt_classes >= 0) & masks
  136. loss_labels = self.loss_labels(
  137. pred_cls[valid_idxs], gt_classes_target[valid_idxs], num_foreground)
  138. # -------------------- regression loss --------------------
  139. loss_bboxes = self.loss_bboxes_ltrb(
  140. pred_delta[foreground_idxs], gt_deltas[foreground_idxs], gt_centerness[foreground_idxs], num_targets)
  141. # -------------------- centerness loss --------------------
  142. loss_centerness = F.binary_cross_entropy_with_logits(
  143. pred_ctn[foreground_idxs], gt_centerness[foreground_idxs], reduction='none')
  144. loss_centerness = loss_centerness.sum() / num_foreground
  145. loss_dict = dict(
  146. loss_cls = loss_labels,
  147. loss_reg = loss_bboxes,
  148. loss_ctn = loss_centerness,
  149. )
  150. return loss_dict
  151. def ota_loss(self, outputs, targets):
  152. """
  153. outputs['pred_cls']: (Tensor) [B, M, C]
  154. outputs['pred_reg']: (Tensor) [B, M, 4]
  155. outputs['pred_box']: (Tensor) [B, M, 4]
  156. outputs['strides']: (List) [8, 16, 32, ...] stride of the model output
  157. targets: (List) [dict{'boxes': [...],
  158. 'labels': [...],
  159. 'orig_size': ...}, ...]
  160. """
  161. # -------------------- Pre-process --------------------
  162. bs = outputs['pred_cls'][0].shape[0]
  163. device = outputs['pred_cls'][0].device
  164. fpn_strides = outputs['strides']
  165. anchors = outputs['anchors']
  166. # preds: [B, M, C]
  167. # preds: [B, M, C]
  168. cls_preds = torch.cat(outputs['pred_cls'], dim=1)
  169. box_preds = torch.cat(outputs['pred_box'], dim=1)
  170. masks = ~torch.cat(outputs['mask'], dim=1).view(-1)
  171. # -------------------- Label Assignment --------------------
  172. cls_targets = []
  173. box_targets = []
  174. assign_metrics = []
  175. for batch_idx in range(bs):
  176. tgt_labels = targets[batch_idx]["labels"].to(device) # [N,]
  177. tgt_bboxes = targets[batch_idx]["boxes"].to(device) # [N, 4]
  178. # refine target
  179. tgt_boxes_wh = tgt_bboxes[..., 2:] - tgt_bboxes[..., :2]
  180. min_tgt_size = torch.min(tgt_boxes_wh, dim=-1)[0]
  181. keep = (min_tgt_size >= 8)
  182. tgt_bboxes = tgt_bboxes[keep]
  183. tgt_labels = tgt_labels[keep]
  184. # label assignment
  185. assigned_result = self.matcher(fpn_strides=fpn_strides,
  186. anchors=anchors,
  187. pred_cls=cls_preds[batch_idx].detach(),
  188. pred_box=box_preds[batch_idx].detach(),
  189. gt_labels=tgt_labels,
  190. gt_bboxes=tgt_bboxes
  191. )
  192. cls_targets.append(assigned_result['assigned_labels'])
  193. box_targets.append(assigned_result['assigned_bboxes'])
  194. assign_metrics.append(assigned_result['assign_metrics'])
  195. # List[B, M, C] -> Tensor[BM, C]
  196. cls_targets = torch.cat(cls_targets, dim=0)
  197. box_targets = torch.cat(box_targets, dim=0)
  198. assign_metrics = torch.cat(assign_metrics, dim=0)
  199. valid_idxs = (cls_targets >= 0) & masks
  200. foreground_idxs = (cls_targets >= 0) & (cls_targets != self.num_classes)
  201. num_fgs = assign_metrics.sum()
  202. if is_dist_avail_and_initialized():
  203. torch.distributed.all_reduce(num_fgs)
  204. num_fgs = torch.clamp(num_fgs / get_world_size(), min=1).item()
  205. # -------------------- classification loss --------------------
  206. cls_preds = cls_preds.view(-1, self.num_classes)[valid_idxs]
  207. qfl_targets = (cls_targets[valid_idxs], assign_metrics[valid_idxs])
  208. loss_labels = self.loss_labels_qfl(cls_preds, qfl_targets, 2.0, num_fgs)
  209. # -------------------- regression loss --------------------
  210. box_preds_pos = box_preds.view(-1, 4)[foreground_idxs]
  211. box_targets_pos = box_targets[foreground_idxs]
  212. loss_bboxes = self.loss_bboxes_xyxy(box_preds_pos, box_targets_pos, num_fgs)
  213. loss_dict = dict(
  214. loss_cls = loss_labels,
  215. loss_reg = loss_bboxes,
  216. )
  217. return loss_dict
  218. def forward(self, outputs, targets):
  219. """
  220. outputs['pred_cls']: (Tensor) [B, M, C]
  221. outputs['pred_reg']: (Tensor) [B, M, 4]
  222. outputs['pred_ctn']: (Tensor) [B, M, 1]
  223. outputs['strides']: (List) [8, 16, 32, ...] stride of the model output
  224. targets: (List) [dict{'boxes': [...],
  225. 'labels': [...],
  226. 'orig_size': ...}, ...]
  227. """
  228. if self.cfg.matcher == "fcos_matcher":
  229. return self.fcos_loss(outputs, targets)
  230. elif self.cfg.matcher == "simota":
  231. return self.ota_loss(outputs, targets)
  232. else:
  233. raise NotImplementedError
  234. if __name__ == "__main__":
  235. pass