| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372 |
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- from utils.box_ops import box_iou, bbox_iou
- # -------------------------- Basic Functions --------------------------
- def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
- """select the positive anchors's center in gt
- Args:
- xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
- gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
- Return:
- (Tensor): shape(bs, n_max_boxes, num_total_anchors)
- """
- n_anchors = xy_centers.size(0)
- bs, n_max_boxes, _ = gt_bboxes.size()
- _gt_bboxes = gt_bboxes.reshape([-1, 4])
- xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
- gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
- gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
- b_lt = xy_centers - gt_bboxes_lt
- b_rb = gt_bboxes_rb - xy_centers
- bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
- bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
- return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
- def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
- """if an anchor box is assigned to multiple gts,
- the one with the highest iou will be selected.
- Args:
- mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
- overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
- Return:
- target_gt_idx (Tensor): shape(bs, num_total_anchors)
- fg_mask (Tensor): shape(bs, num_total_anchors)
- mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
- """
- fg_mask = mask_pos.sum(axis=-2)
- if fg_mask.max() > 1:
- mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
- max_overlaps_idx = overlaps.argmax(axis=1)
- is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
- is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
- mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
- fg_mask = mask_pos.sum(axis=-2)
- target_gt_idx = mask_pos.argmax(axis=-2)
- return target_gt_idx, fg_mask , mask_pos
- def iou_calculator(box1, box2, eps=1e-9):
- """Calculate iou for batch
- Args:
- box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
- box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
- Return:
- (Tensor): shape(bs, n_max_boxes, num_total_anchors)
- """
- box1 = box1.unsqueeze(2) # [N, M1, 4] -> [N, M1, 1, 4]
- box2 = box2.unsqueeze(1) # [N, M2, 4] -> [N, 1, M2, 4]
- px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
- gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
- x1y1 = torch.maximum(px1y1, gx1y1)
- x2y2 = torch.minimum(px2y2, gx2y2)
- overlap = (x2y2 - x1y1).clip(0).prod(-1)
- area1 = (px2y2 - px1y1).clip(0).prod(-1)
- area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
- union = area1 + area2 - overlap + eps
- return overlap / union
- # -------------------------- Task Aligned Assigner --------------------------
- class TaskAlignedAssigner(nn.Module):
- def __init__(self, topk=10, alpha=0.5, beta=6.0, eps=1e-9, num_classes=80):
- super(TaskAlignedAssigner, self).__init__()
- self.topk = topk
- self.num_classes = num_classes
- self.bg_idx = num_classes
- self.alpha = alpha
- self.beta = beta
- self.eps = eps
- @torch.no_grad()
- def forward(self,
- pd_scores,
- pd_bboxes,
- anc_points,
- gt_labels,
- gt_bboxes):
- """This code referenced to
- https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py
- Args:
- pd_scores (Tensor): shape(bs, num_total_anchors, num_classes)
- pd_bboxes (Tensor): shape(bs, num_total_anchors, 4)
- anc_points (Tensor): shape(num_total_anchors, 2)
- gt_labels (Tensor): shape(bs, n_max_boxes, 1)
- gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
- Returns:
- target_labels (Tensor): shape(bs, num_total_anchors)
- target_bboxes (Tensor): shape(bs, num_total_anchors, 4)
- target_scores (Tensor): shape(bs, num_total_anchors, num_classes)
- fg_mask (Tensor): shape(bs, num_total_anchors)
- """
- self.bs = pd_scores.size(0)
- self.n_max_boxes = gt_bboxes.size(1)
- mask_pos, align_metric, overlaps = self.get_pos_mask(
- pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
- target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
- mask_pos, overlaps, self.n_max_boxes)
- # assigned target
- target_labels, target_bboxes, target_scores = self.get_targets(
- gt_labels, gt_bboxes, target_gt_idx, fg_mask)
- # normalize
- align_metric *= mask_pos
- pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj
- pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj
- norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
- target_scores = target_scores * norm_align_metric
- return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
- def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
- # get anchor_align metric, (b, max_num_obj, h*w)
- align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
- # get in_gts mask, (b, max_num_obj, h*w)
- mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
- # get topk_metric mask, (b, max_num_obj, h*w)
- mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
- # merge all mask to a final mask, (b, max_num_obj, h*w)
- mask_pos = mask_topk * mask_in_gts
- return mask_pos, align_metric, overlaps
- def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
- ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj
- ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj
- ind[1] = gt_labels.long().squeeze(-1) # b, max_num_obj
- # get the scores of each grid for each gt cls
- bbox_scores = pd_scores[ind[0], :, ind[1]] # b, max_num_obj, h*w
- overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False).squeeze(3).clamp(0)
- align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
- return align_metric, overlaps
- def select_topk_candidates(self, metrics, largest=True):
- """
- Args:
- metrics: (b, max_num_obj, h*w).
- topk_mask: (b, max_num_obj, topk) or None
- """
- num_anchors = metrics.shape[-1] # h*w
- # (b, max_num_obj, topk)
- topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
- topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk])
- # (b, max_num_obj, topk)
- topk_idxs[~topk_mask] = 0
- # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
- is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
- # filter invalid bboxes
- is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
- return is_in_topk.to(metrics.dtype)
- def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
- """
- Args:
- gt_labels: (b, max_num_obj, 1)
- gt_bboxes: (b, max_num_obj, 4)
- target_gt_idx: (b, h*w)
- fg_mask: (b, h*w)
- """
- # assigned target labels, (b, 1)
- batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
- target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w)
- target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w)
- # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
- target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
- # assigned target scores
- target_labels.clamp(0)
- target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80)
- fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80)
- target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
- return target_labels, target_bboxes, target_scores
-
- # -------------------------- Aligned SimOTA Assigner --------------------------
- class AlignedSimOTA(object):
- """
- This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
- """
- def __init__(self, num_classes, center_sampling_radius, topk_candidate ):
- self.num_classes = num_classes
- self.center_sampling_radius = center_sampling_radius
- self.topk_candidate = topk_candidate
- @torch.no_grad()
- def __call__(self,
- fpn_strides,
- anchors,
- pred_cls,
- pred_box,
- tgt_labels,
- tgt_bboxes):
- # [M,]
- strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
- for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
- # List[F, M, 2] -> [M, 2]
- anchors = torch.cat(anchors, dim=0)
- num_anchor = anchors.shape[0]
- num_gt = len(tgt_labels)
- # ----------------------- Find inside points -----------------------
- fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
- tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
- cls_preds = pred_cls[fg_mask].float() # [Mp, C]
- box_preds = pred_box[fg_mask].float() # [Mp, 4]
- # ----------------------- Reg cost -----------------------
- pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds) # [N, Mp]
- reg_cost = -torch.log(pair_wise_ious + 1e-8) # [N, Mp]
- # ----------------------- Cls cost -----------------------
- with torch.cuda.amp.autocast(enabled=False):
- # [Mp, C] -> [N, Mp, C]
- score_preds = cls_preds.sigmoid_().unsqueeze(0).repeat(num_gt, 1, 1)
- # prepare cls_target
- cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
- cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
- # [N, Mp]
- cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
- del score_preds
- #----------------------- Dynamic K-Matching -----------------------
- cost_matrix = (
- cls_cost
- + 3.0 * reg_cost
- + 100000.0 * (~is_in_boxes_and_center)
- ) # [N, Mp]
- (
- assigned_labels, # [num_fg,]
- assigned_ious, # [num_fg,]
- assigned_indexs, # [num_fg,]
- ) = self.dynamic_k_matching(
- cost_matrix,
- pair_wise_ious,
- tgt_labels,
- num_gt,
- fg_mask
- )
- del cls_cost, cost_matrix, pair_wise_ious, reg_cost
- return fg_mask, assigned_labels, assigned_ious, assigned_indexs
- def get_in_boxes_info(
- self,
- gt_bboxes, # [N, 4]
- anchors, # [M, 2]
- strides, # [M,]
- num_anchors, # M
- num_gt, # N
- ):
- # anchor center
- x_centers = anchors[:, 0]
- y_centers = anchors[:, 1]
- # [M,] -> [1, M] -> [N, M]
- x_centers = x_centers.unsqueeze(0).repeat(num_gt, 1)
- y_centers = y_centers.unsqueeze(0).repeat(num_gt, 1)
- # [N,] -> [N, 1] -> [N, M]
- gt_bboxes_l = gt_bboxes[:, 0].unsqueeze(1).repeat(1, num_anchors) # x1
- gt_bboxes_t = gt_bboxes[:, 1].unsqueeze(1).repeat(1, num_anchors) # y1
- gt_bboxes_r = gt_bboxes[:, 2].unsqueeze(1).repeat(1, num_anchors) # x2
- gt_bboxes_b = gt_bboxes[:, 3].unsqueeze(1).repeat(1, num_anchors) # y2
- b_l = x_centers - gt_bboxes_l
- b_r = gt_bboxes_r - x_centers
- b_t = y_centers - gt_bboxes_t
- b_b = gt_bboxes_b - y_centers
- bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
- is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
- is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
- # in fixed center
- center_radius = self.center_sampling_radius
- # [N, 2]
- gt_centers = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) * 0.5
-
- # [1, M]
- center_radius_ = center_radius * strides.unsqueeze(0)
- gt_bboxes_l = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # x1
- gt_bboxes_t = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # y1
- gt_bboxes_r = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # x2
- gt_bboxes_b = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # y2
- c_l = x_centers - gt_bboxes_l
- c_r = gt_bboxes_r - x_centers
- c_t = y_centers - gt_bboxes_t
- c_b = gt_bboxes_b - y_centers
- center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
- is_in_centers = center_deltas.min(dim=-1).values > 0.0
- is_in_centers_all = is_in_centers.sum(dim=0) > 0
- # in boxes and in centers
- is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
- is_in_boxes_and_center = (
- is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
- )
- return is_in_boxes_anchor, is_in_boxes_and_center
-
-
- def dynamic_k_matching(
- self,
- cost,
- pair_wise_ious,
- gt_classes,
- num_gt,
- fg_mask
- ):
- # Dynamic K
- # ---------------------------------------------------------------
- matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
- ious_in_boxes_matrix = pair_wise_ious
- n_candidate_k = min(self.topk_candidate, ious_in_boxes_matrix.size(1))
- topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
- dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
- dynamic_ks = dynamic_ks.tolist()
- for gt_idx in range(num_gt):
- _, pos_idx = torch.topk(
- cost[gt_idx], k=dynamic_ks[gt_idx], largest=False
- )
- matching_matrix[gt_idx][pos_idx] = 1
- del topk_ious, dynamic_ks, pos_idx
- anchor_matching_gt = matching_matrix.sum(0)
- if (anchor_matching_gt > 1).sum() > 0:
- _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
- matching_matrix[:, anchor_matching_gt > 1] *= 0
- matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
- fg_mask_inboxes = matching_matrix.sum(0) > 0
- fg_mask[fg_mask.clone()] = fg_mask_inboxes
- assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
- assigned_labels = gt_classes[assigned_indexs]
- assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
- fg_mask_inboxes
- ]
- return assigned_labels, assigned_ious, assigned_indexs
-
|