| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177 |
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- from utils.box_ops import get_ious, bbox2dist
- from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
- from .matcher import AlignedSimOTA
- class SetCriterion(object):
- def __init__(self, cfg):
- self.cfg = cfg
- self.reg_max = cfg.reg_max
- self.num_classes = cfg.num_classes
- # --------------- Loss config ---------------
- self.loss_cls_weight = cfg.loss_cls
- self.loss_box_weight = cfg.loss_box
- self.loss_dfl_weight = cfg.loss_dfl
- # --------------- Matcher config ---------------
- self.matcher = AlignedSimOTA(soft_center_radius = cfg.ota_soft_center_radius,
- topk_candidates = cfg.ota_topk_candidates,
- num_classes = cfg.num_classes,
- )
- def loss_classes(self, pred_cls, target, beta=2.0):
- # Quality FocalLoss
- """
- pred_cls: (torch.Tensor): [N, C]。
- target: (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N)
- """
- label, score = target
- pred_sigmoid = pred_cls.sigmoid()
- scale_factor = pred_sigmoid
- zerolabel = scale_factor.new_zeros(pred_cls.shape)
- ce_loss = F.binary_cross_entropy_with_logits(
- pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
-
- bg_class_ind = pred_cls.shape[-1]
- pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
- if pos.shape[0] > 0:
- pos_label = label[pos].long()
- scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
- ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
- pred_cls[pos, pos_label], score[pos],
- reduction='none') * scale_factor.abs().pow(beta)
- return ce_loss
-
- def loss_bboxes(self, pred_box, gt_box):
- ious = get_ious(pred_box, gt_box, box_mode="xyxy", iou_type='giou')
- loss_box = 1.0 - ious
- return loss_box
- def loss_dfl(self, pred_reg, gt_box, anchor, stride):
- # rescale coords by stride
- gt_box_s = gt_box / stride
- anchor_s = anchor / stride
- # compute deltas
- gt_ltrb_s = bbox2dist(anchor_s, gt_box_s, self.reg_max - 1)
- gt_left = gt_ltrb_s.to(torch.long)
- gt_right = gt_left + 1
- weight_left = gt_right.to(torch.float) - gt_ltrb_s
- weight_right = 1 - weight_left
- # loss left
- loss_left = F.cross_entropy(
- pred_reg.view(-1, self.reg_max),
- gt_left.view(-1),
- reduction='none').view(gt_left.shape) * weight_left
- # loss right
- loss_right = F.cross_entropy(
- pred_reg.view(-1, self.reg_max),
- gt_right.view(-1),
- reduction='none').view(gt_left.shape) * weight_right
- loss_dfl = (loss_left + loss_right).mean(-1)
-
- return loss_dfl
- def __call__(self, outputs, targets):
- """
- outputs['pred_cls']: List(Tensor) [B, M, C]
- outputs['pred_box']: List(Tensor) [B, M, 4]
- outputs['pred_box']: List(Tensor) [B, M, 4]
- outputs['strides']: List(Int) [8, 16, 32] output stride
- targets: (List) [dict{'boxes': [...],
- 'labels': [...],
- 'orig_size': ...}, ...]
- """
- bs = outputs['pred_cls'][0].shape[0]
- device = outputs['pred_cls'][0].device
- fpn_strides = outputs['strides']
- anchors = outputs['anchors']
- # preds: [B, M, C]
- cls_preds = torch.cat(outputs['pred_cls'], dim=1)
- box_preds = torch.cat(outputs['pred_box'], dim=1)
- reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-
- # --------------- label assignment ---------------
- cls_targets = []
- box_targets = []
- assign_metrics = []
- for batch_idx in range(bs):
- tgt_labels = targets[batch_idx]["labels"].to(device) # [N,]
- tgt_bboxes = targets[batch_idx]["boxes"].to(device) # [N, 4]
- assigned_result = self.matcher(fpn_strides=fpn_strides,
- anchors=anchors,
- pred_cls=cls_preds[batch_idx].detach(),
- pred_box=box_preds[batch_idx].detach(),
- gt_labels=tgt_labels,
- gt_bboxes=tgt_bboxes
- )
- cls_targets.append(assigned_result['assigned_labels'])
- box_targets.append(assigned_result['assigned_bboxes'])
- assign_metrics.append(assigned_result['assign_metrics'])
- # List[B, M, C] -> Tensor[BM, C]
- cls_targets = torch.cat(cls_targets, dim=0)
- box_targets = torch.cat(box_targets, dim=0)
- assign_metrics = torch.cat(assign_metrics, dim=0)
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = self.num_classes
- pos_inds = ((cls_targets >= 0) & (cls_targets < bg_class_ind)).nonzero().squeeze(1)
- num_fgs = assign_metrics.sum()
- if is_dist_avail_and_initialized():
- torch.distributed.all_reduce(num_fgs)
- num_fgs = (num_fgs / get_world_size()).clamp(1.0).item()
- # ------------------ Classification loss ------------------
- cls_preds = cls_preds.view(-1, self.num_classes)
- loss_cls = self.loss_classes(cls_preds, (cls_targets, assign_metrics))
- loss_cls = loss_cls.sum() / num_fgs
- # ------------------ Regression loss ------------------
- box_preds_pos = box_preds.view(-1, 4)[pos_inds]
- box_targets_pos = box_targets[pos_inds]
- loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos)
- loss_box = loss_box.sum() / num_fgs
- # ------------------ Distribution focal loss ------------------
- ## process anchors
- anchors = torch.cat(outputs['anchors'], dim=0)
- anchors = anchors[None].repeat(bs, 1, 1).view(-1, 2)
- ## process stride tensors
- strides = torch.cat(outputs['stride_tensor'], dim=0)
- strides = strides.unsqueeze(0).repeat(bs, 1, 1).view(-1, 1)
- ## fg preds
- reg_preds_pos = reg_preds.view(-1, 4*self.reg_max)[pos_inds]
- anchors_pos = anchors[pos_inds]
- strides_pos = strides[pos_inds]
- ## compute dfl
- loss_dfl = self.loss_dfl(reg_preds_pos, box_targets_pos, anchors_pos, strides_pos)
- loss_dfl = loss_dfl.sum() / num_fgs
- # total loss
- losses = self.loss_cls_weight * loss_cls + \
- self.loss_box_weight * loss_box + \
- self.loss_dfl_weight * loss_dfl
- loss_dict = dict(
- loss_cls = loss_cls,
- loss_box = loss_box,
- loss_dfl = loss_dfl,
- losses = losses
- )
- return loss_dict
-
|