matcher.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. # ---------------------------------------------------------------------
  2. # Copyright (c) Megvii Inc. All rights reserved.
  3. # ---------------------------------------------------------------------
  4. import math
  5. import torch
  6. import torch.nn.functional as F
  7. from utils.box_ops import *
  8. @torch.no_grad()
  9. def get_ious_and_iou_loss(inputs,
  10. targets,
  11. weight=None,
  12. box_mode="xyxy",
  13. loss_type="iou",
  14. reduction="none"):
  15. """
  16. Compute iou loss of type ['iou', 'giou', 'linear_iou']
  17. Args:
  18. inputs (tensor): pred values
  19. targets (tensor): target values
  20. weight (tensor): loss weight
  21. box_mode (str): 'xyxy' or 'ltrb', 'ltrb' is currently supported.
  22. loss_type (str): 'giou' or 'iou' or 'linear_iou'
  23. reduction (str): reduction manner
  24. Returns:
  25. loss (tensor): computed iou loss.
  26. """
  27. if box_mode == "ltrb":
  28. inputs = torch.cat((-inputs[..., :2], inputs[..., 2:]), dim=-1)
  29. targets = torch.cat((-targets[..., :2], targets[..., 2:]), dim=-1)
  30. elif box_mode != "xyxy":
  31. raise NotImplementedError
  32. eps = torch.finfo(torch.float32).eps
  33. inputs_area = (inputs[..., 2] - inputs[..., 0]).clamp_(min=0) \
  34. * (inputs[..., 3] - inputs[..., 1]).clamp_(min=0)
  35. targets_area = (targets[..., 2] - targets[..., 0]).clamp_(min=0) \
  36. * (targets[..., 3] - targets[..., 1]).clamp_(min=0)
  37. w_intersect = (torch.min(inputs[..., 2], targets[..., 2])
  38. - torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
  39. h_intersect = (torch.min(inputs[..., 3], targets[..., 3])
  40. - torch.max(inputs[..., 1], targets[..., 1])).clamp_(min=0)
  41. area_intersect = w_intersect * h_intersect
  42. area_union = targets_area + inputs_area - area_intersect
  43. ious = area_intersect / area_union.clamp(min=eps)
  44. if loss_type == "iou":
  45. loss = -ious.clamp(min=eps).log()
  46. elif loss_type == "linear_iou":
  47. loss = 1 - ious
  48. elif loss_type == "giou":
  49. g_w_intersect = torch.max(inputs[..., 2], targets[..., 2]) \
  50. - torch.min(inputs[..., 0], targets[..., 0])
  51. g_h_intersect = torch.max(inputs[..., 3], targets[..., 3]) \
  52. - torch.min(inputs[..., 1], targets[..., 1])
  53. ac_uion = g_w_intersect * g_h_intersect
  54. gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
  55. loss = 1 - gious
  56. else:
  57. raise NotImplementedError
  58. if weight is not None:
  59. loss = loss * weight.view(loss.size())
  60. if reduction == "mean":
  61. loss = loss.sum() / max(weight.sum().item(), eps)
  62. else:
  63. if reduction == "mean":
  64. loss = loss.mean()
  65. if reduction == "sum":
  66. loss = loss.sum()
  67. return ious, loss
  68. class FcosMatcher(object):
  69. """
  70. This code referenced to https://github.com/Megvii-BaseDetection/cvpods
  71. """
  72. def __init__(self,
  73. num_classes,
  74. center_sampling_radius,
  75. object_sizes_of_interest,
  76. box_weights=[1, 1, 1, 1]):
  77. self.num_classes = num_classes
  78. self.center_sampling_radius = center_sampling_radius
  79. self.object_sizes_of_interest = object_sizes_of_interest
  80. self.box_weightss = box_weights
  81. def get_deltas(self, anchors, boxes):
  82. """
  83. Get box regression transformation deltas (dl, dt, dr, db) that can be used
  84. to transform the `anchors` into the `boxes`. That is, the relation
  85. ``boxes == self.apply_deltas(deltas, anchors)`` is true.
  86. Args:
  87. anchors (Tensor): anchors, e.g., feature map coordinates
  88. boxes (Tensor): target of the transformation, e.g., ground-truth
  89. boxes.
  90. """
  91. assert isinstance(anchors, torch.Tensor), type(anchors)
  92. assert isinstance(boxes, torch.Tensor), type(boxes)
  93. deltas = torch.cat((anchors - boxes[..., :2], boxes[..., 2:] - anchors),
  94. dim=-1) * anchors.new_tensor(self.box_weightss)
  95. return deltas
  96. @torch.no_grad()
  97. def __call__(self, fpn_strides, anchors, targets):
  98. """
  99. fpn_strides: (List) List[8, 16, 32, ...] stride of network output.
  100. anchors: (List of Tensor) List[F, M, 2], F = num_fpn_levels
  101. targets: (Dict) dict{'boxes': [...],
  102. 'labels': [...],
  103. 'orig_size': ...}
  104. """
  105. gt_classes = []
  106. gt_anchors_deltas = []
  107. gt_centerness = []
  108. device = anchors[0].device
  109. # List[F, M, 2] -> [M, 2]
  110. anchors_over_all_feature_maps = torch.cat(anchors, dim=0).to(device)
  111. for targets_per_image in targets:
  112. # generate object_sizes_of_interest: List[[M, 2]]
  113. object_sizes_of_interest = [anchors_i.new_tensor(scale_range).unsqueeze(0).expand(anchors_i.size(0), -1)
  114. for anchors_i, scale_range in zip(anchors, self.object_sizes_of_interest)]
  115. # List[F, M, 2] -> [M, 2], M = M1 + M2 + ... + MF
  116. object_sizes_of_interest = torch.cat(object_sizes_of_interest, dim=0)
  117. # [N, 4]
  118. tgt_box = targets_per_image['boxes'].to(device)
  119. # [N, C]
  120. tgt_cls = targets_per_image['labels'].to(device)
  121. # [N, M, 4], M = M1 + M2 + ... + MF
  122. deltas = self.get_deltas(anchors_over_all_feature_maps, tgt_box.unsqueeze(1))
  123. has_gt = (len(tgt_cls) > 0)
  124. if has_gt:
  125. if self.center_sampling_radius > 0:
  126. # bbox centers: [N, 2]
  127. centers = (tgt_box[..., :2] + tgt_box[..., 2:]) * 0.5
  128. is_in_boxes = []
  129. for stride, anchors_i in zip(fpn_strides, anchors):
  130. radius = stride * self.center_sampling_radius
  131. # [N, 4]
  132. center_boxes = torch.cat((
  133. torch.max(centers - radius, tgt_box[:, :2]),
  134. torch.min(centers + radius, tgt_box[:, 2:]),
  135. ), dim=-1)
  136. # [N, Mi, 4]
  137. center_deltas = self.get_deltas(anchors_i, center_boxes.unsqueeze(1))
  138. # [N, Mi]
  139. is_in_boxes.append(center_deltas.min(dim=-1).values > 0)
  140. # [N, M], M = M1 + M2 + ... + MF
  141. is_in_boxes = torch.cat(is_in_boxes, dim=1)
  142. else:
  143. # no center sampling, it will use all the locations within a ground-truth box
  144. # [N, M], M = M1 + M2 + ... + MF
  145. is_in_boxes = deltas.min(dim=-1).values > 0
  146. # [N, M], M = M1 + M2 + ... + MF
  147. max_deltas = deltas.max(dim=-1).values
  148. # limit the regression range for each location
  149. is_cared_in_the_level = \
  150. (max_deltas >= object_sizes_of_interest[None, :, 0]) & \
  151. (max_deltas <= object_sizes_of_interest[None, :, 1])
  152. # [N,]
  153. tgt_box_area = (tgt_box[:, 2] - tgt_box[:, 0]) * (tgt_box[:, 3] - tgt_box[:, 1])
  154. # [N,] -> [N, 1] -> [N, M]
  155. gt_positions_area = tgt_box_area.unsqueeze(1).repeat(
  156. 1, anchors_over_all_feature_maps.size(0))
  157. gt_positions_area[~is_in_boxes] = math.inf
  158. gt_positions_area[~is_cared_in_the_level] = math.inf
  159. # if there are still more than one objects for a position,
  160. # we choose the one with minimal area
  161. # [M,], each element is the index of ground-truth
  162. positions_min_area, gt_matched_idxs = gt_positions_area.min(dim=0)
  163. # ground truth box regression
  164. # [M, 4]
  165. gt_anchors_reg_deltas_i = self.get_deltas(
  166. anchors_over_all_feature_maps, tgt_box[gt_matched_idxs])
  167. # [M,]
  168. tgt_cls_i = tgt_cls[gt_matched_idxs]
  169. # anchors with area inf are treated as background.
  170. tgt_cls_i[positions_min_area == math.inf] = self.num_classes
  171. # ground truth centerness
  172. left_right = gt_anchors_reg_deltas_i[:, [0, 2]]
  173. top_bottom = gt_anchors_reg_deltas_i[:, [1, 3]]
  174. # [M,]
  175. gt_centerness_i = torch.sqrt(
  176. (left_right.min(dim=-1).values / left_right.max(dim=-1).values).clamp_(min=0)
  177. * (top_bottom.min(dim=-1).values / top_bottom.max(dim=-1).values).clamp_(min=0)
  178. )
  179. gt_classes.append(tgt_cls_i)
  180. gt_anchors_deltas.append(gt_anchors_reg_deltas_i)
  181. gt_centerness.append(gt_centerness_i)
  182. del centers, center_boxes, deltas, max_deltas, center_deltas
  183. else:
  184. tgt_cls_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device) + self.num_classes
  185. gt_anchors_reg_deltas_i = torch.zeros([anchors_over_all_feature_maps.shape[0], 4], device=device)
  186. gt_centerness_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device)
  187. gt_classes.append(tgt_cls_i.long())
  188. gt_anchors_deltas.append(gt_anchors_reg_deltas_i.float())
  189. gt_centerness.append(gt_centerness_i.float())
  190. # [B, M], [B, M, 4], [B, M]
  191. return torch.stack(gt_classes), torch.stack(gt_anchors_deltas), torch.stack(gt_centerness)
  192. class SimOtaMatcher(object):
  193. def __init__(self, num_classes, soft_center_radius=3.0, topk_candidates=13):
  194. self.num_classes = num_classes
  195. self.soft_center_radius = soft_center_radius
  196. self.topk_candidates = topk_candidates
  197. @torch.no_grad()
  198. def __call__(self,
  199. fpn_strides,
  200. anchors,
  201. pred_cls,
  202. pred_box,
  203. gt_labels,
  204. gt_bboxes):
  205. # [M,]
  206. strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
  207. for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
  208. # List[F, M, 2] -> [M, 2]
  209. num_gt = len(gt_labels)
  210. anchors = torch.cat(anchors, dim=0)
  211. # check gt
  212. if num_gt == 0 or gt_bboxes.max().item() == 0.:
  213. return {
  214. 'assigned_labels': gt_labels.new_full(pred_cls[..., 0].shape,
  215. self.num_classes,
  216. dtype=torch.long),
  217. 'assigned_bboxes': gt_bboxes.new_full(pred_box.shape, 0),
  218. 'assign_metrics': gt_bboxes.new_full(pred_cls[..., 0].shape, 0)
  219. }
  220. # get inside points: [N, M]
  221. is_in_gt = self.find_inside_points(gt_bboxes, anchors)
  222. valid_mask = is_in_gt.sum(dim=0) > 0 # [M,]
  223. # ----------------------------------- soft center prior -----------------------------------
  224. gt_center = (gt_bboxes[..., :2] + gt_bboxes[..., 2:]) / 2.0
  225. distance = (anchors.unsqueeze(0) - gt_center.unsqueeze(1)
  226. ).pow(2).sum(-1).sqrt() / strides.unsqueeze(0) # [N, M]
  227. distance = distance * valid_mask.unsqueeze(0)
  228. soft_center_prior = torch.pow(10, distance - self.soft_center_radius)
  229. # ----------------------------------- regression cost -----------------------------------
  230. pair_wise_ious, _ = box_iou(gt_bboxes, pred_box) # [N, M]
  231. pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) * 3.0
  232. # ----------------------------------- classification cost -----------------------------------
  233. ## select the predicted scores corresponded to the gt_labels
  234. pairwise_pred_scores = pred_cls.permute(1, 0) # [M, C] -> [C, M]
  235. pairwise_pred_scores = pairwise_pred_scores[gt_labels.long(), :].float() # [N, M]
  236. ## scale factor
  237. scale_factor = (pair_wise_ious - pairwise_pred_scores.sigmoid()).abs().pow(2.0)
  238. ## cls cost
  239. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
  240. pairwise_pred_scores, pair_wise_ious,
  241. reduction="none") * scale_factor # [N, M]
  242. del pairwise_pred_scores
  243. ## foreground cost matrix
  244. cost_matrix = pair_wise_cls_loss + pair_wise_ious_loss + soft_center_prior
  245. max_pad_value = torch.ones_like(cost_matrix) * 1e9
  246. cost_matrix = torch.where(valid_mask[None].repeat(num_gt, 1), # [N, M]
  247. cost_matrix, max_pad_value)
  248. # ----------------------------------- dynamic label assignment -----------------------------------
  249. matched_pred_ious, matched_gt_inds, fg_mask_inboxes = self.dynamic_k_matching(
  250. cost_matrix, pair_wise_ious, num_gt)
  251. del pair_wise_cls_loss, cost_matrix, pair_wise_ious, pair_wise_ious_loss
  252. # -----------------------------------process assigned labels -----------------------------------
  253. assigned_labels = gt_labels.new_full(pred_cls[..., 0].shape,
  254. self.num_classes) # [M,]
  255. assigned_labels[fg_mask_inboxes] = gt_labels[matched_gt_inds].squeeze(-1)
  256. assigned_labels = assigned_labels.long() # [M,]
  257. assigned_bboxes = gt_bboxes.new_full(pred_box.shape, 0) # [M, 4]
  258. assigned_bboxes[fg_mask_inboxes] = gt_bboxes[matched_gt_inds] # [M, 4]
  259. assign_metrics = gt_bboxes.new_full(pred_cls[..., 0].shape, 0) # [M,]
  260. assign_metrics[fg_mask_inboxes] = matched_pred_ious # [M,]
  261. assigned_dict = dict(
  262. assigned_labels=assigned_labels,
  263. assigned_bboxes=assigned_bboxes,
  264. assign_metrics=assign_metrics
  265. )
  266. return assigned_dict
  267. def find_inside_points(self, gt_bboxes, anchors):
  268. """
  269. gt_bboxes: Tensor -> [N, 2]
  270. anchors: Tensor -> [M, 2]
  271. """
  272. num_anchors = anchors.shape[0]
  273. num_gt = gt_bboxes.shape[0]
  274. anchors_expand = anchors.unsqueeze(0).repeat(num_gt, 1, 1) # [N, M, 2]
  275. gt_bboxes_expand = gt_bboxes.unsqueeze(1).repeat(1, num_anchors, 1) # [N, M, 4]
  276. # offset
  277. lt = anchors_expand - gt_bboxes_expand[..., :2]
  278. rb = gt_bboxes_expand[..., 2:] - anchors_expand
  279. bbox_deltas = torch.cat([lt, rb], dim=-1)
  280. is_in_gts = bbox_deltas.min(dim=-1).values > 0
  281. return is_in_gts
  282. def dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):
  283. """Use IoU and matching cost to calculate the dynamic top-k positive
  284. targets.
  285. Args:
  286. cost_matrix (Tensor): Cost matrix.
  287. pairwise_ious (Tensor): Pairwise iou matrix.
  288. num_gt (int): Number of gt.
  289. valid_mask (Tensor): Mask for valid bboxes.
  290. Returns:
  291. tuple: matched ious and gt indexes.
  292. """
  293. matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8)
  294. # select candidate topk ious for dynamic-k calculation
  295. candidate_topk = min(self.topk_candidates, pairwise_ious.size(1))
  296. topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1)
  297. # calculate dynamic k for each gt
  298. dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
  299. # sorting the batch cost matirx is faster than topk
  300. _, sorted_indices = torch.sort(cost_matrix, dim=1)
  301. for gt_idx in range(num_gt):
  302. topk_ids = sorted_indices[gt_idx, :dynamic_ks[gt_idx]]
  303. matching_matrix[gt_idx, :][topk_ids] = 1
  304. del topk_ious, dynamic_ks, topk_ids
  305. prior_match_gt_mask = matching_matrix.sum(0) > 1
  306. if prior_match_gt_mask.sum() > 0:
  307. cost_min, cost_argmin = torch.min(
  308. cost_matrix[:, prior_match_gt_mask], dim=0)
  309. matching_matrix[:, prior_match_gt_mask] *= 0
  310. matching_matrix[cost_argmin, prior_match_gt_mask] = 1
  311. # get foreground mask inside box and center prior
  312. fg_mask_inboxes = matching_matrix.sum(0) > 0
  313. matched_pred_ious = (matching_matrix *
  314. pairwise_ious).sum(0)[fg_mask_inboxes]
  315. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
  316. return matched_pred_ious, matched_gt_inds, fg_mask_inboxes