| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218 |
- import math
- import torch
- import torch.nn.functional as F
- from utils.box_ops import *
- @torch.no_grad()
- def get_ious_and_iou_loss(inputs,
- targets,
- weight=None,
- box_mode="xyxy",
- loss_type="iou",
- reduction="none"):
- """
- Compute iou loss of type ['iou', 'giou', 'linear_iou']
- Args:
- inputs (tensor): pred values
- targets (tensor): target values
- weight (tensor): loss weight
- box_mode (str): 'xyxy' or 'ltrb', 'ltrb' is currently supported.
- loss_type (str): 'giou' or 'iou' or 'linear_iou'
- reduction (str): reduction manner
- Returns:
- loss (tensor): computed iou loss.
- """
- if box_mode == "ltrb":
- inputs = torch.cat((-inputs[..., :2], inputs[..., 2:]), dim=-1)
- targets = torch.cat((-targets[..., :2], targets[..., 2:]), dim=-1)
- elif box_mode != "xyxy":
- raise NotImplementedError
- eps = torch.finfo(torch.float32).eps
- inputs_area = (inputs[..., 2] - inputs[..., 0]).clamp_(min=0) \
- * (inputs[..., 3] - inputs[..., 1]).clamp_(min=0)
- targets_area = (targets[..., 2] - targets[..., 0]).clamp_(min=0) \
- * (targets[..., 3] - targets[..., 1]).clamp_(min=0)
- w_intersect = (torch.min(inputs[..., 2], targets[..., 2])
- - torch.max(inputs[..., 0], targets[..., 0])).clamp_(min=0)
- h_intersect = (torch.min(inputs[..., 3], targets[..., 3])
- - torch.max(inputs[..., 1], targets[..., 1])).clamp_(min=0)
- area_intersect = w_intersect * h_intersect
- area_union = targets_area + inputs_area - area_intersect
- ious = area_intersect / area_union.clamp(min=eps)
- if loss_type == "iou":
- loss = -ious.clamp(min=eps).log()
- elif loss_type == "linear_iou":
- loss = 1 - ious
- elif loss_type == "giou":
- g_w_intersect = torch.max(inputs[..., 2], targets[..., 2]) \
- - torch.min(inputs[..., 0], targets[..., 0])
- g_h_intersect = torch.max(inputs[..., 3], targets[..., 3]) \
- - torch.min(inputs[..., 1], targets[..., 1])
- ac_uion = g_w_intersect * g_h_intersect
- gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
- loss = 1 - gious
- else:
- raise NotImplementedError
- if weight is not None:
- loss = loss * weight.view(loss.size())
- if reduction == "mean":
- loss = loss.sum() / max(weight.sum().item(), eps)
- else:
- if reduction == "mean":
- loss = loss.mean()
- if reduction == "sum":
- loss = loss.sum()
- return ious, loss
- class FcosMatcher(object):
- """
- This code referenced to https://github.com/Megvii-BaseDetection/cvpods
- """
- def __init__(self,
- num_classes,
- center_sampling_radius,
- object_sizes_of_interest,
- box_weights=[1, 1, 1, 1]):
- self.num_classes = num_classes
- self.center_sampling_radius = center_sampling_radius
- self.object_sizes_of_interest = object_sizes_of_interest
- self.box_weightss = box_weights
- def get_deltas(self, anchors, boxes):
- """
- Get box regression transformation deltas (dl, dt, dr, db) that can be used
- to transform the `anchors` into the `boxes`. That is, the relation
- ``boxes == self.apply_deltas(deltas, anchors)`` is true.
- Args:
- anchors (Tensor): anchors, e.g., feature map coordinates
- boxes (Tensor): target of the transformation, e.g., ground-truth
- boxes.
- """
- assert isinstance(anchors, torch.Tensor), type(anchors)
- assert isinstance(boxes, torch.Tensor), type(boxes)
- deltas = torch.cat((anchors - boxes[..., :2], boxes[..., 2:] - anchors),
- dim=-1) * anchors.new_tensor(self.box_weightss)
- return deltas
- @torch.no_grad()
- def __call__(self, fpn_strides, anchors, targets):
- """
- fpn_strides: (List) List[8, 16, 32, ...] stride of network output.
- anchors: (List of Tensor) List[F, M, 2], F = num_fpn_levels
- targets: (Dict) dict{'boxes': [...],
- 'labels': [...],
- 'orig_size': ...}
- """
- gt_classes = []
- gt_anchors_deltas = []
- gt_centerness = []
- device = anchors[0].device
- # List[F, M, 2] -> [M, 2]
- anchors_over_all_feature_maps = torch.cat(anchors, dim=0).to(device)
- for targets_per_image in targets:
- # generate object_sizes_of_interest: List[[M, 2]]
- object_sizes_of_interest = [anchors_i.new_tensor(scale_range).unsqueeze(0).expand(anchors_i.size(0), -1)
- for anchors_i, scale_range in zip(anchors, self.object_sizes_of_interest)]
- # List[F, M, 2] -> [M, 2], M = M1 + M2 + ... + MF
- object_sizes_of_interest = torch.cat(object_sizes_of_interest, dim=0)
- # [N, 4]
- tgt_box = targets_per_image['boxes'].to(device)
- # [N, C]
- tgt_cls = targets_per_image['labels'].to(device)
- # [N, M, 4], M = M1 + M2 + ... + MF
- deltas = self.get_deltas(anchors_over_all_feature_maps, tgt_box.unsqueeze(1))
- has_gt = (len(tgt_cls) > 0)
- if has_gt:
- if self.center_sampling_radius > 0:
- # bbox centers: [N, 2]
- centers = (tgt_box[..., :2] + tgt_box[..., 2:]) * 0.5
- is_in_boxes = []
- for stride, anchors_i in zip(fpn_strides, anchors):
- radius = stride * self.center_sampling_radius
- # [N, 4]
- center_boxes = torch.cat((
- torch.max(centers - radius, tgt_box[:, :2]),
- torch.min(centers + radius, tgt_box[:, 2:]),
- ), dim=-1)
- # [N, Mi, 4]
- center_deltas = self.get_deltas(anchors_i, center_boxes.unsqueeze(1))
- # [N, Mi]
- is_in_boxes.append(center_deltas.min(dim=-1).values > 0)
- # [N, M], M = M1 + M2 + ... + MF
- is_in_boxes = torch.cat(is_in_boxes, dim=1)
- else:
- # no center sampling, it will use all the locations within a ground-truth box
- # [N, M], M = M1 + M2 + ... + MF
- is_in_boxes = deltas.min(dim=-1).values > 0
- # [N, M], M = M1 + M2 + ... + MF
- max_deltas = deltas.max(dim=-1).values
- # limit the regression range for each location
- is_cared_in_the_level = \
- (max_deltas >= object_sizes_of_interest[None, :, 0]) & \
- (max_deltas <= object_sizes_of_interest[None, :, 1])
- # [N,]
- tgt_box_area = (tgt_box[:, 2] - tgt_box[:, 0]) * (tgt_box[:, 3] - tgt_box[:, 1])
- # [N,] -> [N, 1] -> [N, M]
- gt_positions_area = tgt_box_area.unsqueeze(1).repeat(
- 1, anchors_over_all_feature_maps.size(0))
- gt_positions_area[~is_in_boxes] = math.inf
- gt_positions_area[~is_cared_in_the_level] = math.inf
- # if there are still more than one objects for a position,
- # we choose the one with minimal area
- # [M,], each element is the index of ground-truth
- positions_min_area, gt_matched_idxs = gt_positions_area.min(dim=0)
- # ground truth box regression
- # [M, 4]
- gt_anchors_reg_deltas_i = self.get_deltas(
- anchors_over_all_feature_maps, tgt_box[gt_matched_idxs])
- # [M,]
- tgt_cls_i = tgt_cls[gt_matched_idxs]
- # anchors with area inf are treated as background.
- tgt_cls_i[positions_min_area == math.inf] = self.num_classes
- # ground truth centerness
- left_right = gt_anchors_reg_deltas_i[:, [0, 2]]
- top_bottom = gt_anchors_reg_deltas_i[:, [1, 3]]
- # [M,]
- gt_centerness_i = torch.sqrt(
- (left_right.min(dim=-1).values / left_right.max(dim=-1).values).clamp_(min=0)
- * (top_bottom.min(dim=-1).values / top_bottom.max(dim=-1).values).clamp_(min=0)
- )
- gt_classes.append(tgt_cls_i)
- gt_anchors_deltas.append(gt_anchors_reg_deltas_i)
- gt_centerness.append(gt_centerness_i)
- del centers, center_boxes, deltas, max_deltas, center_deltas
- else:
- tgt_cls_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device) + self.num_classes
- gt_anchors_reg_deltas_i = torch.zeros([anchors_over_all_feature_maps.shape[0], 4], device=device)
- gt_centerness_i = torch.zeros(anchors_over_all_feature_maps.shape[0], device=device)
- gt_classes.append(tgt_cls_i.long())
- gt_anchors_deltas.append(gt_anchors_reg_deltas_i.float())
- gt_centerness.append(gt_centerness_i.float())
- # [B, M], [B, M, 4], [B, M]
- return torch.stack(gt_classes), torch.stack(gt_anchors_deltas), torch.stack(gt_centerness)
|