matcher.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. import math
  2. import torch
  3. import torch.nn.functional as F
  4. from utils.box_ops import *
  5. @torch.no_grad()
  6. def get_ious_and_iou_loss(inputs,
  7. targets,
  8. weight=None,
  9. box_mode="xyxy",
  10. loss_type="iou",
  11. reduction="none"):
  12. """
  13. Compute iou loss of type ['iou', 'giou', 'linear_iou']
  14. Args:
  15. inputs (tensor): pred values
  16. targets (tensor): target values
  17. weight (tensor): loss weight
  18. box_mode (str): 'xyxy' or 'ltrb', 'ltrb' is currently supported.
  19. loss_type (str): 'giou' or 'iou' or 'linear_iou'
  20. reduction (str): reduction manner
  21. Returns:
  22. loss (tensor): computed iou loss.
  23. """
  24. if box_mode == "ltrb":
  25. inputs = torch.cat((-inputs[..., :2], inputs[..., 2:]), dim=-1)
  26. targets = torch.cat((-targets[..., :2], targets[..., 2:]), dim=-1)
  27. elif box_mode != "xyxy":
  28. raise NotImplementedError
  29. eps = torch.finfo(torch.float32).eps
  30. inputs_area = (inputs[..., 2] - inputs[..., 0]).clamp_(min=0) \
  31. * (inputs[..., 3] - inputs[..., 1]).clamp_(min=0)
  32. targets_area = (targets[..., 2] - targets[..., 0]).clamp_(min=0) \
  33. * (targets[..., 3] - targets[..., 1]).clamp_(min=0)
  34. w_intersect = (torch.min(inputs[..., 2], targets[..., 2])
  35. - torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
  36. h_intersect = (torch.min(inputs[..., 3], targets[..., 3])
  37. - torch.max(inputs[..., 1], targets[..., 1])).clamp_(min=0)
  38. area_intersect = w_intersect * h_intersect
  39. area_union = targets_area + inputs_area - area_intersect
  40. ious = area_intersect / area_union.clamp(min=eps)
  41. if loss_type == "iou":
  42. loss = -ious.clamp(min=eps).log()
  43. elif loss_type == "linear_iou":
  44. loss = 1 - ious
  45. elif loss_type == "giou":
  46. g_w_intersect = torch.max(inputs[..., 2], targets[..., 2]) \
  47. - torch.min(inputs[..., 0], targets[..., 0])
  48. g_h_intersect = torch.max(inputs[..., 3], targets[..., 3]) \
  49. - torch.min(inputs[..., 1], targets[..., 1])
  50. ac_uion = g_w_intersect * g_h_intersect
  51. gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
  52. loss = 1 - gious
  53. else:
  54. raise NotImplementedError
  55. if weight is not None:
  56. loss = loss * weight.view(loss.size())
  57. if reduction == "mean":
  58. loss = loss.sum() / max(weight.sum().item(), eps)
  59. else:
  60. if reduction == "mean":
  61. loss = loss.mean()
  62. if reduction == "sum":
  63. loss = loss.sum()
  64. return ious, loss
  65. class FcosMatcher(object):
  66. """
  67. This code referenced to https://github.com/Megvii-BaseDetection/cvpods
  68. """
  69. def __init__(self,
  70. num_classes,
  71. center_sampling_radius,
  72. object_sizes_of_interest,
  73. box_weights=[1, 1, 1, 1]):
  74. self.num_classes = num_classes
  75. self.center_sampling_radius = center_sampling_radius
  76. self.object_sizes_of_interest = object_sizes_of_interest
  77. self.box_weightss = box_weights
  78. def get_deltas(self, anchors, boxes):
  79. """
  80. Get box regression transformation deltas (dl, dt, dr, db) that can be used
  81. to transform the `anchors` into the `boxes`. That is, the relation
  82. ``boxes == self.apply_deltas(deltas, anchors)`` is true.
  83. Args:
  84. anchors (Tensor): anchors, e.g., feature map coordinates
  85. boxes (Tensor): target of the transformation, e.g., ground-truth
  86. boxes.
  87. """
  88. assert isinstance(anchors, torch.Tensor), type(anchors)
  89. assert isinstance(boxes, torch.Tensor), type(boxes)
  90. deltas = torch.cat((anchors - boxes[..., :2], boxes[..., 2:] - anchors),
  91. dim=-1) * anchors.new_tensor(self.box_weightss)
  92. return deltas
  93. @torch.no_grad()
  94. def __call__(self, fpn_strides, anchors, targets):
  95. """
  96. fpn_strides: (List) List[8, 16, 32, ...] stride of network output.
  97. anchors: (List of Tensor) List[F, M, 2], F = num_fpn_levels
  98. targets: (Dict) dict{'boxes': [...],
  99. 'labels': [...],
  100. 'orig_size': ...}
  101. """
  102. gt_classes = []
  103. gt_anchors_deltas = []
  104. gt_centerness = []
  105. device = anchors[0].device
  106. # List[F, M, 2] -> [M, 2]
  107. anchors_over_all_feature_maps = torch.cat(anchors, dim=0).to(device)
  108. for targets_per_image in targets:
  109. # generate object_sizes_of_interest: List[[M, 2]]
  110. object_sizes_of_interest = [anchors_i.new_tensor(scale_range).unsqueeze(0).expand(anchors_i.size(0), -1)
  111. for anchors_i, scale_range in zip(anchors, self.object_sizes_of_interest)]
  112. # List[F, M, 2] -> [M, 2], M = M1 + M2 + ... + MF
  113. object_sizes_of_interest = torch.cat(object_sizes_of_interest, dim=0)
  114. # [N, 4]
  115. tgt_box = targets_per_image['boxes'].to(device)
  116. # [N, C]
  117. tgt_cls = targets_per_image['labels'].to(device)
  118. # [N, M, 4], M = M1 + M2 + ... + MF
  119. deltas = self.get_deltas(anchors_over_all_feature_maps, tgt_box.unsqueeze(1))
  120. has_gt = (len(tgt_cls) > 0)
  121. if has_gt:
  122. if self.center_sampling_radius > 0:
  123. # bbox centers: [N, 2]
  124. centers = (tgt_box[..., :2] + tgt_box[..., 2:]) * 0.5
  125. is_in_boxes = []
  126. for stride, anchors_i in zip(fpn_strides, anchors):
  127. radius = stride * self.center_sampling_radius
  128. # [N, 4]
  129. center_boxes = torch.cat((
  130. torch.max(centers - radius, tgt_box[:, :2]),
  131. torch.min(centers + radius, tgt_box[:, 2:]),
  132. ), dim=-1)
  133. # [N, Mi, 4]
  134. center_deltas = self.get_deltas(anchors_i, center_boxes.unsqueeze(1))
  135. # [N, Mi]
  136. is_in_boxes.append(center_deltas.min(dim=-1).values > 0)
  137. # [N, M], M = M1 + M2 + ... + MF
  138. is_in_boxes = torch.cat(is_in_boxes, dim=1)
  139. else:
  140. # no center sampling, it will use all the locations within a ground-truth box
  141. # [N, M], M = M1 + M2 + ... + MF
  142. is_in_boxes = deltas.min(dim=-1).values > 0
  143. # [N, M], M = M1 + M2 + ... + MF
  144. max_deltas = deltas.max(dim=-1).values
  145. # limit the regression range for each location
  146. is_cared_in_the_level = \
  147. (max_deltas >= object_sizes_of_interest[None, :, 0]) & \
  148. (max_deltas <= object_sizes_of_interest[None, :, 1])
  149. # [N,]
  150. tgt_box_area = (tgt_box[:, 2] - tgt_box[:, 0]) * (tgt_box[:, 3] - tgt_box[:, 1])
  151. # [N,] -> [N, 1] -> [N, M]
  152. gt_positions_area = tgt_box_area.unsqueeze(1).repeat(
  153. 1, anchors_over_all_feature_maps.size(0))
  154. gt_positions_area[~is_in_boxes] = math.inf
  155. gt_positions_area[~is_cared_in_the_level] = math.inf
  156. # if there are still more than one objects for a position,
  157. # we choose the one with minimal area
  158. # [M,], each element is the index of ground-truth
  159. positions_min_area, gt_matched_idxs = gt_positions_area.min(dim=0)
  160. # ground truth box regression
  161. # [M, 4]
  162. gt_anchors_reg_deltas_i = self.get_deltas(
  163. anchors_over_all_feature_maps, tgt_box[gt_matched_idxs])
  164. # [M,]
  165. tgt_cls_i = tgt_cls[gt_matched_idxs]
  166. # anchors with area inf are treated as background.
  167. tgt_cls_i[positions_min_area == math.inf] = self.num_classes
  168. # ground truth centerness
  169. left_right = gt_anchors_reg_deltas_i[:, [0, 2]]
  170. top_bottom = gt_anchors_reg_deltas_i[:, [1, 3]]
  171. # [M,]
  172. gt_centerness_i = torch.sqrt(
  173. (left_right.min(dim=-1).values / left_right.max(dim=-1).values).clamp_(min=0)
  174. * (top_bottom.min(dim=-1).values / top_bottom.max(dim=-1).values).clamp_(min=0)
  175. )
  176. gt_classes.append(tgt_cls_i)
  177. gt_anchors_deltas.append(gt_anchors_reg_deltas_i)
  178. gt_centerness.append(gt_centerness_i)
  179. del centers, center_boxes, deltas, max_deltas, center_deltas
  180. else:
  181. tgt_cls_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device) + self.num_classes
  182. gt_anchors_reg_deltas_i = torch.zeros([anchors_over_all_feature_maps.shape[0], 4], device=device)
  183. gt_centerness_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device)
  184. gt_classes.append(tgt_cls_i.long())
  185. gt_anchors_deltas.append(gt_anchors_reg_deltas_i.float())
  186. gt_centerness.append(gt_centerness_i.float())
  187. # [B, M], [B, M, 4], [B, M]
  188. return torch.stack(gt_classes), torch.stack(gt_anchors_deltas), torch.stack(gt_centerness)