yjh0410 1 年間 前
コミット
425b09af6a

+ 1 - 1
models/detectors/rtdetr/rtdetr.py

@@ -47,7 +47,7 @@ class RT_DETR(nn.Module):
         self.detect_decoder = build_transformer(cfg, self.fpn_dims, num_classes, return_intermediate=self.training)
 
     def post_process(self, box_pred, cls_pred):
-        # xyxy -> bwbh
+        # xywh -> xyxy
         box_preds_x1y1 = box_pred[..., :2] - 0.5 * box_pred[..., 2:]
         box_preds_x2y2 = box_pred[..., :2] + 0.5 * box_pred[..., 2:]
         box_pred = torch.cat([box_preds_x1y1, box_preds_x2y2], dim=-1)

+ 5 - 0
models/detectors/rtpdetr/rtpdetr.py

@@ -123,6 +123,11 @@ class RT_PDETR(nn.Module):
         return pos_embed
 
     def post_process(self, box_pred, cls_pred):
+        # xywh -> xyxy
+        box_preds_x1y1 = box_pred[..., :2] - 0.5 * box_pred[..., 2:]
+        box_preds_x2y2 = box_pred[..., :2] + 0.5 * box_pred[..., 2:]
+        box_pred = torch.cat([box_preds_x1y1, box_preds_x2y2], dim=-1)
+
         cls_pred = cls_pred[0]
         box_pred = box_pred[0]
         if self.no_multi_labels:

+ 1 - 0
models/detectors/rtpdetr/rtpdetr_decoder.py

@@ -10,6 +10,7 @@ except:
     from  basic_modules.basic import LayerNorm2D
     from  basic_modules.transformer import GlobalDecoder
 
+
 def build_transformer(cfg, return_intermediate=False):
     if cfg['transformer'] == 'plain_detr_transformer':
         return PlainDETRTransformer(d_model             = cfg['hidden_dim'],

+ 0 - 129
models/detectors/vitdet/basic_modules/backbone.py

@@ -1,129 +0,0 @@
-import torch
-import torchvision
-from torch import nn
-from torchvision.models._utils import IntermediateLayerGetter
-
-try:
-    from .basic import FrozenBatchNorm2d
-except:
-    from basic  import FrozenBatchNorm2d
-   
-
-# IN1K MIM pretrained weights (from SparK: https://github.com/keyu-tian/SparK)
-pretrained_urls = {
-    # ResNet series
-    'resnet18':  None,
-    'resnet34':  None,
-    'resnet50':  "https://github.com/yjh0410/RT-ODLab/releases/download/backbone_weight/resnet50_in1k_spark_pretrained_timm_style.pth",
-    'resnet101': None,
-    # ShuffleNet series
-}
-
-
-# ----------------- Model functions -----------------
-## Build backbone network
-def build_backbone(cfg, pretrained=False):
-    print('==============================')
-    print('Backbone: {}'.format(cfg['backbone']))
-    # ResNet
-    if 'resnet' in cfg['backbone']:
-        model, feats = build_resnet(cfg, pretrained)
-    else:
-        raise NotImplementedError("Unknown backbone: <>.".format(cfg['backbone']))
-    
-    return model, feats
-
-
-# ----------------- ResNet Backbone -----------------
-class VisionTransformer(nn.Module):
-    """Vision Transformer."""
-    def __init__(self,
-                 name: str,
-                 norm_type: str,
-                 pretrained: bool = False,
-                 freeze_at: int = -1,
-                 freeze_stem_only: bool = False):
-        super().__init__()
-        # Pretrained
-        # Norm layer
-        if norm_type == 'BN':
-            norm_layer = nn.BatchNorm2d
-        elif norm_type == 'FrozeBN':
-            norm_layer = FrozenBatchNorm2d
-        # Backbone
-        backbone = getattr(torchvision.models, name)(norm_layer=norm_layer,)
-        return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
-        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
-        self.feat_dims = [128, 256, 512] if name in ('resnet18', 'resnet34') else [512, 1024, 2048]
-        
-        # Load pretrained
-        if pretrained:
-            self.load_pretrained(name)
-
-        # Freeze
-        if freeze_at >= 0:
-            for name, parameter in backbone.named_parameters():
-                if freeze_stem_only:
-                    if 'layer1' not in name and 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
-                        parameter.requires_grad_(False)
-                else:
-                    if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
-                        parameter.requires_grad_(False)
-
-    def load_pretrained(self, name):
-        url = pretrained_urls[name]
-        if url is not None:
-            print('Loading pretrained weight from : {}'.format(url))
-            # checkpoint state dict
-            checkpoint_state_dict = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            # model state dict
-            model_state_dict = self.body.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print('Unused key: ', k)
-            # load the weight
-            self.body.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No backbone pretrained for {}.'.format(name))
-
-    def forward(self, x):
-        xs = self.body(x)
-        fmp_list = []
-        for name, fmp in xs.items():
-            fmp_list.append(fmp)
-
-        return fmp_list
-
-def build_resnet(cfg, pretrained=False):
-    # ResNet series
-    backbone = None
-
-    return backbone
-
-
-if __name__ == '__main__':
-    cfg = {
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'pretrained': True,
-        'freeze_at': 0,
-        'freeze_stem_only': False,
-    }
-    model, feat_dim = build_backbone(cfg, cfg['pretrained'])
-    model.eval()
-    print(feat_dim)
-
-    x = torch.ones(2, 3, 320, 320)
-    output = model(x)
-    for y in output:
-        print(y.size())
-    print(output[-1])
-

+ 0 - 238
models/detectors/vitdet/basic_modules/basic.py

@@ -1,238 +0,0 @@
-import math
-import warnings
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
-    """Copy from timm"""
-    with torch.no_grad():
-        """Copy from timm"""
-        def norm_cdf(x):
-            return (1. + math.erf(x / math.sqrt(2.))) / 2.
-
-        if (mean < a - 2 * std) or (mean > b + 2 * std):
-            warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
-                        "The distribution of values may be incorrect.",
-                        stacklevel=2)
-
-        l = norm_cdf((a - mean) / std)
-        u = norm_cdf((b - mean) / std)
-
-        tensor.uniform_(2 * l - 1, 2 * u - 1)
-        tensor.erfinv_()
-
-        tensor.mul_(std * math.sqrt(2.))
-        tensor.add_(mean)
-
-        tensor.clamp_(min=a, max=b)
-
-        return tensor
-
-
-# ---------------------------- NMS ----------------------------
-## basic NMS
-def nms(bboxes, scores, nms_thresh):
-    """"Pure Python NMS."""
-    x1 = bboxes[:, 0]  #xmin
-    y1 = bboxes[:, 1]  #ymin
-    x2 = bboxes[:, 2]  #xmax
-    y2 = bboxes[:, 3]  #ymax
-
-    areas = (x2 - x1) * (y2 - y1)
-    order = scores.argsort()[::-1]
-
-    keep = []
-    while order.size > 0:
-        i = order[0]
-        keep.append(i)
-        # compute iou
-        xx1 = np.maximum(x1[i], x1[order[1:]])
-        yy1 = np.maximum(y1[i], y1[order[1:]])
-        xx2 = np.minimum(x2[i], x2[order[1:]])
-        yy2 = np.minimum(y2[i], y2[order[1:]])
-
-        w = np.maximum(1e-10, xx2 - xx1)
-        h = np.maximum(1e-10, yy2 - yy1)
-        inter = w * h
-
-        iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-14)
-        #reserve all the boundingbox whose ovr less than thresh
-        inds = np.where(iou <= nms_thresh)[0]
-        order = order[inds + 1]
-
-    return keep
-
-## class-agnostic NMS 
-def multiclass_nms_class_agnostic(scores, labels, bboxes, nms_thresh):
-    # nms
-    keep = nms(bboxes, scores, nms_thresh)
-    scores = scores[keep]
-    labels = labels[keep]
-    bboxes = bboxes[keep]
-
-    return scores, labels, bboxes
-
-## class-aware NMS 
-def multiclass_nms_class_aware(scores, labels, bboxes, nms_thresh, num_classes):
-    # nms
-    keep = np.zeros(len(bboxes), dtype=np.int32)
-    for i in range(num_classes):
-        inds = np.where(labels == i)[0]
-        if len(inds) == 0:
-            continue
-        c_bboxes = bboxes[inds]
-        c_scores = scores[inds]
-        c_keep = nms(c_bboxes, c_scores, nms_thresh)
-        keep[inds[c_keep]] = 1
-    keep = np.where(keep > 0)
-    scores = scores[keep]
-    labels = labels[keep]
-    bboxes = bboxes[keep]
-
-    return scores, labels, bboxes
-
-## multi-class NMS 
-def multiclass_nms(scores, labels, bboxes, nms_thresh, num_classes, class_agnostic=False):
-    if class_agnostic:
-        return multiclass_nms_class_agnostic(scores, labels, bboxes, nms_thresh)
-    else:
-        return multiclass_nms_class_aware(scores, labels, bboxes, nms_thresh, num_classes)
-
-
-# ----------------- Customed NormLayer Ops -----------------
-class LayerNorm2D(nn.Module):
-    def __init__(self, normalized_shape, norm_layer=nn.LayerNorm):
-        super().__init__()
-        self.ln = norm_layer(normalized_shape) if norm_layer is not None else nn.Identity()
-
-    def forward(self, x):
-        """
-        x: N C H W
-        """
-        x = x.permute(0, 2, 3, 1)
-        x = self.ln(x)
-        x = x.permute(0, 3, 1, 2)
-        return x
-
-
-# ----------------- Basic CNN Ops -----------------
-def get_conv2d(c1, c2, k, p, s, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type == 'gelu':
-        return nn.GELU()
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-        
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                ):
-        super(BasicConv, self).__init__()
-        add_bias = False if norm_type else True
-        self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, g=1, bias=add_bias)
-        self.norm = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        return self.act(self.norm(self.conv(x)))
-
-class UpSampleWrapper(nn.Module):
-    """Upsample last feat map to specific stride."""
-    def __init__(self, in_dim, upsample_factor):
-        super(UpSampleWrapper, self).__init__()
-        # ---------- Basic parameters ----------
-        self.upsample_factor = upsample_factor
-
-        # ---------- Network parameters ----------
-        if upsample_factor == 1:
-            self.upsample = nn.Identity()
-        else:
-            scale = int(math.log2(upsample_factor))
-            dim = in_dim
-            layers = []
-            for _ in range(scale-1):
-                layers += [
-                    nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2),
-                    LayerNorm2D(dim),
-                    nn.GELU()
-                ]
-            layers += [nn.ConvTranspose2d(dim, dim, kernel_size=2, stride=2)]
-            self.upsample = nn.Sequential(*layers)
-            self.out_dim = dim
-
-    def forward(self, x):
-        x = self.upsample(x)
-
-        return x
-
-
-# ----------------- MLP modules -----------------
-class MLP(nn.Module):
-    def __init__(self, in_dim, hidden_dim, out_dim, num_layers):
-        super().__init__()
-        self.num_layers = num_layers
-        h = [hidden_dim] * (num_layers - 1)
-        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([in_dim] + h, h + [out_dim]))
-
-    def forward(self, x):
-        for i, layer in enumerate(self.layers):
-            x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
-        return x
-
-class FFN(nn.Module):
-    def __init__(self, d_model=256, mlp_ratio=4.0, dropout=0., act_type='relu', pre_norm=False):
-        super().__init__()
-        # ----------- Basic parameters -----------
-        self.pre_norm = pre_norm
-        self.fpn_dim = round(d_model * mlp_ratio)
-        # ----------- Network parameters -----------
-        self.linear1 = nn.Linear(d_model, self.fpn_dim)
-        self.activation = get_activation(act_type)
-        self.dropout2 = nn.Dropout(dropout)
-        self.linear2 = nn.Linear(self.fpn_dim, d_model)
-        self.dropout3 = nn.Dropout(dropout)
-        self.norm = nn.LayerNorm(d_model)
-
-    def forward(self, src):
-        if self.pre_norm:
-            src = self.norm(src)
-            src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
-            src = src + self.dropout3(src2)
-        else:
-            src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
-            src = src + self.dropout3(src2)
-            src = self.norm(src)
-        
-        return src

+ 0 - 0
models/detectors/vitdet/build.py


+ 0 - 163
models/detectors/vitdet/loss.py

@@ -1,163 +0,0 @@
-import torch
-import torch.nn.functional as F
-
-try:
-    from .loss_utils import get_ious, get_world_size, is_dist_avail_and_initialized
-    from .matcher import AlignedSimOtaMatcher
-except:
-    from  loss_utils import get_ious, get_world_size, is_dist_avail_and_initialized
-    from  matcher import AlignedSimOtaMatcher
-
-
-class Criterion(object):
-    def __init__(self, cfg, num_classes=80):
-        # ------------ Basic parameters ------------
-        self.cfg = cfg
-        self.num_classes = num_classes
-        # --------------- Matcher config ---------------
-        self.matcher_hpy = cfg['matcher_hpy']
-        self.matcher = AlignedSimOtaMatcher(soft_center_radius = self.matcher_hpy['soft_center_radius'],
-                                            topk_candidates    = self.matcher_hpy['topk_candidates'],
-                                            num_classes        = num_classes,
-                                            )
-        # ------------- Loss weight -------------
-        self.weight_dict = {'loss_cls':  cfg['loss_coeff']['class'],
-                            'loss_box':  cfg['loss_coeff']['bbox'],
-                            'loss_giou': cfg['loss_coeff']['giou']}
-
-    def loss_classes(self, pred_cls, target, num_gts, beta=2.0):
-        # Quality FocalLoss
-        """
-            pred_cls: (torch.Tensor): [N, C]。
-            target:   (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N)
-        """
-        label, score = target
-        pred_sigmoid = pred_cls.sigmoid()
-        scale_factor = pred_sigmoid
-        zerolabel = scale_factor.new_zeros(pred_cls.shape)
-
-        ce_loss = F.binary_cross_entropy_with_logits(
-            pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
-        
-        bg_class_ind = pred_cls.shape[-1]
-        pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
-        pos_label = label[pos].long()
-
-        scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
-
-        ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
-            pred_cls[pos, pos_label], score[pos],
-            reduction='none') * scale_factor.abs().pow(beta)
-        
-        losses = {}
-        losses['loss_cls'] = ce_loss.sum() / num_gts
-
-        return losses
-    
-    def loss_bboxes(self, pred_reg, pred_box, gt_box, anchors, stride_tensors, num_gts):
-        # --------------- Compute L1 loss ---------------
-        ## xyxy -> cxcy&bwbh
-        gt_cxcy = (gt_box[..., :2] + gt_box[..., 2:]) * 0.5
-        gt_bwbh = gt_box[..., 2:] - gt_box[..., :2]
-        ## Encode gt box
-        gt_cxcy_encode = (gt_cxcy - anchors) / stride_tensors
-        gt_bwbh_encode = torch.log(gt_bwbh / stride_tensors)
-        gt_box_encode = torch.cat([gt_cxcy_encode, gt_bwbh_encode], dim=-1)
-        # L1 loss
-        loss_box = F.l1_loss(pred_reg, gt_box_encode, reduction='none')
-
-        # --------------- Compute GIoU loss ---------------
-        gious = get_ious(pred_box, gt_box, box_mode="xyxy", iou_type='giou')
-        loss_giou = 1.0 - gious
-
-        losses = {}
-        losses['loss_box'] = loss_box.sum() / num_gts
-        losses['loss_giou'] = loss_giou.sum() / num_gts
-
-        return losses
-    
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        anchors = outputs['anchors']
-        fpn_strides = outputs['strides']
-        stride_tensors = outputs['stride_tensors']
-        losses = dict()
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        reg_preds = torch.cat(outputs['pred_reg'], dim=1)
-        
-        # --------------- label assignment ---------------
-        cls_targets = []
-        box_targets = []
-        assign_metrics = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)  # [N,]
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)   # [N, 4]
-            assigned_result = self.matcher(fpn_strides=fpn_strides,
-                                           anchors=anchors,
-                                           pred_cls=cls_preds[batch_idx].detach(),
-                                           pred_box=box_preds[batch_idx].detach(),
-                                           gt_labels=tgt_labels,
-                                           gt_bboxes=tgt_bboxes
-                                           )
-            cls_targets.append(assigned_result['assigned_labels'])
-            box_targets.append(assigned_result['assigned_bboxes'])
-            assign_metrics.append(assigned_result['assign_metrics'])
-
-        # List[B, M, C] -> Tensor[BM, C]
-        cls_targets = torch.cat(cls_targets, dim=0)
-        box_targets = torch.cat(box_targets, dim=0)
-        assign_metrics = torch.cat(assign_metrics, dim=0)
-
-        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
-        bg_class_ind = self.num_classes
-        pos_inds = ((cls_targets >= 0) & (cls_targets < bg_class_ind)).nonzero().squeeze(1)
-        num_fgs = assign_metrics.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0).item()
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_dict = self.loss_classes(cls_preds, (cls_targets, assign_metrics), num_fgs)
-        loss_dict = {k: loss_dict[k] * self.weight_dict[k] for k in loss_dict if k in self.weight_dict}
-        losses.update(loss_dict)
-
-        # ------------------ Regression loss ------------------
-        box_targets_pos = box_targets[pos_inds]
-        ## positive predictions
-        box_preds_pos = box_preds.view(-1, 4)[pos_inds]
-        reg_preds_pos = reg_preds.view(-1, 4)[pos_inds]
-
-        ## anchor tensors
-        anchors_tensors = torch.cat(anchors, dim=0)[None].repeat(bs, 1, 1)
-        anchors_tensors_pos = anchors_tensors.view(-1, 2)[pos_inds]
-
-        ## stride tensors
-        stride_tensors = torch.cat(stride_tensors, dim=0)[None].repeat(bs, 1, 1)
-        stride_tensors_pos = stride_tensors.view(-1, 1)[pos_inds]
-
-        ## aux loss
-        loss_dict = self.loss_bboxes(reg_preds_pos, box_preds_pos, box_targets_pos, anchors_tensors_pos, stride_tensors_pos, num_fgs)
-        loss_dict = {k: loss_dict[k] * self.weight_dict[k] for k in loss_dict if k in self.weight_dict}
-        losses.update(loss_dict)
-
-        return losses
-    
-
-def build_criterion(cfg, num_classes):
-    criterion = Criterion(cfg, num_classes)
-
-    return criterion

+ 0 - 87
models/detectors/vitdet/loss_utils.py

@@ -1,87 +0,0 @@
-import torch
-import torch.distributed as dist
-from torchvision.ops.boxes import box_area
-
-
-# ------------------------- For box -------------------------
-def box_iou(boxes1, boxes2):
-    area1 = box_area(boxes1)
-    area2 = box_area(boxes2)
-
-    lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]
-    rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]
-
-    wh = (rb - lt).clamp(min=0)  # [N,M,2]
-    inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]
-
-    union = area1[:, None] + area2 - inter
-
-    iou = inter / union
-    return iou, union
-
-def get_ious(bboxes1,
-             bboxes2,
-             box_mode="xyxy",
-             iou_type="iou"):
-    """
-    Compute iou loss of type ['iou', 'giou', 'linear_iou']
-
-    Args:
-        inputs (tensor): pred values
-        targets (tensor): target values
-        weight (tensor): loss weight
-        box_mode (str): 'xyxy' or 'ltrb', 'ltrb' is currently supported.
-        loss_type (str): 'giou' or 'iou' or 'linear_iou'
-        reduction (str): reduction manner
-
-    Returns:
-        loss (tensor): computed iou loss.
-    """
-    if box_mode == "ltrb":
-        bboxes1 = torch.cat((-bboxes1[..., :2], bboxes1[..., 2:]), dim=-1)
-        bboxes2 = torch.cat((-bboxes2[..., :2], bboxes2[..., 2:]), dim=-1)
-    elif box_mode != "xyxy":
-        raise NotImplementedError
-
-    eps = torch.finfo(torch.float32).eps
-
-    bboxes1_area = (bboxes1[..., 2] - bboxes1[..., 0]).clamp_(min=0) \
-        * (bboxes1[..., 3] - bboxes1[..., 1]).clamp_(min=0)
-    bboxes2_area = (bboxes2[..., 2] - bboxes2[..., 0]).clamp_(min=0) \
-        * (bboxes2[..., 3] - bboxes2[..., 1]).clamp_(min=0)
-
-    w_intersect = (torch.min(bboxes1[..., 2], bboxes2[..., 2])
-                   - torch.max(bboxes1[..., 0], bboxes2[..., 0])).clamp_(min=0)
-    h_intersect = (torch.min(bboxes1[..., 3], bboxes2[..., 3])
-                   - torch.max(bboxes1[..., 1], bboxes2[..., 1])).clamp_(min=0)
-
-    area_intersect = w_intersect * h_intersect
-    area_union = bboxes2_area + bboxes1_area - area_intersect
-    ious = area_intersect / area_union.clamp(min=eps)
-
-    if iou_type == "iou":
-        return ious
-    elif iou_type == "giou":
-        g_w_intersect = torch.max(bboxes1[..., 2], bboxes2[..., 2]) \
-            - torch.min(bboxes1[..., 0], bboxes2[..., 0])
-        g_h_intersect = torch.max(bboxes1[..., 3], bboxes2[..., 3]) \
-            - torch.min(bboxes1[..., 1], bboxes2[..., 1])
-        ac_uion = g_w_intersect * g_h_intersect
-        gious = ious - (ac_uion - area_union) / ac_uion.clamp(min=eps)
-        return gious
-    else:
-        raise NotImplementedError
-
-
-# ------------------------- For distributed -------------------------
-def is_dist_avail_and_initialized():
-    if not dist.is_available():
-        return False
-    if not dist.is_initialized():
-        return False
-    return True
-
-def get_world_size():
-    if not is_dist_avail_and_initialized():
-        return 1
-    return dist.get_world_size()

+ 0 - 164
models/detectors/vitdet/matcher.py

@@ -1,164 +0,0 @@
-# ------------------------------------------------------------------------------------------
-# This code referenced to https://github.com/open-mmlab/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py
-# ------------------------------------------------------------------------------------------
-import torch
-import torch.nn.functional as F
-
-try:
-    from .loss_utils import box_iou
-except:
-    from  loss_utils import box_iou
-
-
-# -------------------------- Aligned SimOTA assigner --------------------------
-class AlignedSimOtaMatcher(object):
-    def __init__(self, num_classes, soft_center_radius=3.0, topk_candidates=13):
-        self.num_classes = num_classes
-        self.soft_center_radius = soft_center_radius
-        self.topk_candidates = topk_candidates
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_cls, 
-                 pred_box, 
-                 gt_labels,
-                 gt_bboxes):
-        # [M,]
-        strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        num_gt = len(gt_labels)
-        anchors = torch.cat(anchors, dim=0)
-
-        # check gt
-        if num_gt == 0 or gt_bboxes.max().item() == 0.:
-            return {
-                'assigned_labels': gt_labels.new_full(pred_cls[..., 0].shape,
-                                                      self.num_classes,
-                                                      dtype=torch.long),
-                'assigned_bboxes': gt_bboxes.new_full(pred_box.shape, 0),
-                'assign_metrics': gt_bboxes.new_full(pred_cls[..., 0].shape, 0)
-            }
-        
-        # get inside points: [N, M]
-        is_in_gt = self.find_inside_points(gt_bboxes, anchors)
-        valid_mask = is_in_gt.sum(dim=0) > 0  # [M,]
-
-        # ----------------------------------- soft center prior -----------------------------------
-        gt_center = (gt_bboxes[..., :2] + gt_bboxes[..., 2:]) / 2.0
-        distance = (anchors.unsqueeze(0) - gt_center.unsqueeze(1)
-                    ).pow(2).sum(-1).sqrt() / strides.unsqueeze(0)  # [N, M]
-        distance = distance * valid_mask.unsqueeze(0)
-        soft_center_prior = torch.pow(10, distance - self.soft_center_radius)
-
-        # ----------------------------------- regression cost -----------------------------------
-        pair_wise_ious, _ = box_iou(gt_bboxes, pred_box)  # [N, M]
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) * 3.0
-
-        # ----------------------------------- classification cost -----------------------------------
-        ## select the predicted scores corresponded to the gt_labels
-        pairwise_pred_scores = pred_cls.permute(1, 0)  # [M, C] -> [C, M]
-        pairwise_pred_scores = pairwise_pred_scores[gt_labels.long(), :].float()   # [N, M]
-        ## scale factor
-        scale_factor = (pair_wise_ious - pairwise_pred_scores.sigmoid()).abs().pow(2.0)
-        ## cls cost
-        pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
-            pairwise_pred_scores, pair_wise_ious,
-            reduction="none") * scale_factor # [N, M]
-            
-        del pairwise_pred_scores
-
-        ## foreground cost matrix
-        cost_matrix = pair_wise_cls_loss + pair_wise_ious_loss + soft_center_prior
-        max_pad_value = torch.ones_like(cost_matrix) * 1e9
-        cost_matrix = torch.where(valid_mask[None].repeat(num_gt, 1),   # [N, M]
-                                  cost_matrix, max_pad_value)
-
-        # ----------------------------------- dynamic label assignment -----------------------------------
-        matched_pred_ious, matched_gt_inds, fg_mask_inboxes = self.dynamic_k_matching(
-            cost_matrix, pair_wise_ious, num_gt)
-        del pair_wise_cls_loss, cost_matrix, pair_wise_ious, pair_wise_ious_loss
-
-        # -----------------------------------process assigned labels -----------------------------------
-        assigned_labels = gt_labels.new_full(pred_cls[..., 0].shape,
-                                             self.num_classes)  # [M,]
-        assigned_labels[fg_mask_inboxes] = gt_labels[matched_gt_inds].squeeze(-1)
-        assigned_labels = assigned_labels.long()  # [M,]
-
-        assigned_bboxes = gt_bboxes.new_full(pred_box.shape, 0)        # [M, 4]
-        assigned_bboxes[fg_mask_inboxes] = gt_bboxes[matched_gt_inds]  # [M, 4]
-
-        assign_metrics = gt_bboxes.new_full(pred_cls[..., 0].shape, 0) # [M, 4]
-        assign_metrics[fg_mask_inboxes] = matched_pred_ious            # [M, 4]
-
-        assigned_dict = dict(
-            assigned_labels=assigned_labels,
-            assigned_bboxes=assigned_bboxes,
-            assign_metrics=assign_metrics
-            )
-        
-        return assigned_dict
-
-    def find_inside_points(self, gt_bboxes, anchors):
-        """
-            gt_bboxes: Tensor -> [N, 2]
-            anchors:   Tensor -> [M, 2]
-        """
-        num_anchors = anchors.shape[0]
-        num_gt = gt_bboxes.shape[0]
-
-        anchors_expand = anchors.unsqueeze(0).repeat(num_gt, 1, 1)           # [N, M, 2]
-        gt_bboxes_expand = gt_bboxes.unsqueeze(1).repeat(1, num_anchors, 1)  # [N, M, 4]
-
-        # offset
-        lt = anchors_expand - gt_bboxes_expand[..., :2]
-        rb = gt_bboxes_expand[..., 2:] - anchors_expand
-        bbox_deltas = torch.cat([lt, rb], dim=-1)
-
-        is_in_gts = bbox_deltas.min(dim=-1).values > 0
-
-        return is_in_gts
-    
-    def dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):
-        """Use IoU and matching cost to calculate the dynamic top-k positive
-        targets.
-
-        Args:
-            cost_matrix (Tensor): Cost matrix.
-            pairwise_ious (Tensor): Pairwise iou matrix.
-            num_gt (int): Number of gt.
-            valid_mask (Tensor): Mask for valid bboxes.
-        Returns:
-            tuple: matched ious and gt indexes.
-        """
-        matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8)
-        # select candidate topk ious for dynamic-k calculation
-        candidate_topk = min(self.topk_candidates, pairwise_ious.size(1))
-        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1)
-        # calculate dynamic k for each gt
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-
-        # sorting the batch cost matirx is faster than topk
-        _, sorted_indices = torch.sort(cost_matrix, dim=1)
-        for gt_idx in range(num_gt):
-            topk_ids = sorted_indices[gt_idx, :dynamic_ks[gt_idx]]
-            matching_matrix[gt_idx, :][topk_ids] = 1
-
-        del topk_ious, dynamic_ks, topk_ids
-
-        prior_match_gt_mask = matching_matrix.sum(0) > 1
-        if prior_match_gt_mask.sum() > 0:
-            cost_min, cost_argmin = torch.min(
-                cost_matrix[:, prior_match_gt_mask], dim=0)
-            matching_matrix[:, prior_match_gt_mask] *= 0
-            matching_matrix[cost_argmin, prior_match_gt_mask] = 1
-
-        # get foreground mask inside box and center prior
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-        matched_pred_ious = (matching_matrix *
-                             pairwise_ious).sum(0)[fg_mask_inboxes]
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-
-        return matched_pred_ious, matched_gt_inds, fg_mask_inboxes

+ 0 - 230
models/detectors/vitdet/vitdet.py

@@ -1,230 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-try:
-    from .vitdet_encoder import build_image_encoder
-    from .vitdet_decoder import build_decoder
-    from .vitdet_head    import build_predictor
-    from .basic_modules.basic import multiclass_nms
-except:
-    from  vitdet_encoder import build_image_encoder
-    from  vitdet_decoder import build_decoder
-    from  vitdet_head    import build_predictor
-    from  basic_modules.basic import multiclass_nms
-
-
-
-# Real-time ViT-based Object Detector
-class ViTDet(nn.Module):
-    def __init__(self,
-                 cfg,
-                 device,
-                 num_classes = 20,
-                 conf_thresh = 0.01,
-                 nms_thresh  = 0.5,
-                 topk        = 1000,
-                 trainable   = False,
-                 deploy      = False,
-                 no_multi_labels    = False,
-                 nms_class_agnostic = False,
-                 ):
-        super(ViTDet, self).__init__()
-        # ---------------------- Basic Parameters ----------------------
-        self.cfg = cfg
-        self.device = device
-        self.strides = cfg['stride']
-        self.num_classes = num_classes
-        ## Scale hidden channels by width_factor
-        cfg['hidden_dim'] = round(cfg['hidden_dim'] * cfg['width'])
-        cfg['pretrained'] = cfg['pretrained'] & trainable
-        ## Post-process parameters
-        self.conf_thresh = conf_thresh
-        self.nms_thresh = nms_thresh
-        self.topk = topk
-        self.deploy = deploy
-        self.no_multi_labels = no_multi_labels
-        self.nms_class_agnostic = nms_class_agnostic
-        
-        # ---------------------- Network Parameters ----------------------
-        ## ----------- Encoder -----------
-        self.encoder = build_image_encoder(cfg)
-
-        ## ----------- Decoder -----------
-        self.decoder = build_decoder(cfg, self.encoder.fpn_dims, num_levels=3)
-        
-        ## ----------- Preds -----------
-        self.predictor = build_predictor(cfg, self.strides, num_classes, 4, 3)
-
-    def post_process(self, cls_preds, box_preds):
-        """
-        Input:
-            cls_preds: List[np.array] -> [[M, C], ...]
-            box_preds: List[np.array] -> [[M, 4], ...]
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = cls_pred_i.sigmoid().flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        if not self.deploy:
-            # to cpu & numpy
-            scores = scores.cpu().numpy()
-            labels = labels.cpu().numpy()
-            bboxes = bboxes.cpu().numpy()
-
-            # nms
-            scores, labels, bboxes = multiclass_nms(
-                scores, labels, bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
-
-        return bboxes, scores, labels
-    
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.encoder(x)
-
-        # ---------------- Heads ----------------
-        outputs = self.decoder(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.predictor(outputs['cls_feats'], outputs['reg_feats'])
-
-        if not self.training:
-            cls_pred = outputs["pred_cls"]
-            box_pred = outputs["pred_box"]
-            # post process
-            bboxes, scores, labels = self.post_process(cls_pred, box_pred)
-
-            outputs = {
-                "scores": scores,
-                "labels": labels,
-                "bboxes": bboxes
-            }
-                    
-        return outputs
-        
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    from loss import build_criterion
-
-    # Model config
-    cfg = {
-        'width': 1.0,
-        'depth': 1.0,
-        'out_stride': [8, 16, 32],
-        # Image Encoder - Backbone
-        'backbone': 'resnet18',
-        'backbone_norm': 'BN',
-        'res5_dilation': False,
-        'pretrained': True,
-        'pretrained_weight': 'imagenet1k_v1',
-        'freeze_at': 0,
-        'freeze_stem_only': False,
-        'out_stride': [8, 16, 32],
-        'max_stride': 32,
-        # Convolutional Decoder
-        'hidden_dim': 256,
-        'decoder': 'det_decoder',
-        'de_num_cls_layers': 2,
-        'de_num_reg_layers': 2,
-        'de_act': 'silu',
-        'de_norm': 'BN',
-        # Matcher
-        'matcher_hpy': {'soft_center_radius': 2.5,
-                        'topk_candidates': 13,},
-        # Loss
-        'use_vfl': True,
-        'loss_coeff': {'class': 1,
-                       'bbox': 1,
-                       'giou': 2,},
-        }
-    bs = 1
-    # Create a batch of images & targets
-    image = torch.randn(bs, 3, 640, 640).cuda()
-    targets = [{
-        'labels': torch.tensor([2, 4, 5, 8]).long().cuda(),
-        'boxes':  torch.tensor([[0, 0, 10, 10], [12, 23, 56, 70], [0, 10, 20, 30], [50, 60, 55, 150]]).float().cuda() / 640.
-    }] * bs
-
-    # Create model
-    model = ViTDet(cfg, num_classes=20)
-    model.train().cuda()
-
-    # Create criterion
-    criterion = build_criterion(cfg, num_classes=20)
-
-    # Model inference
-    t0 = time.time()
-    outputs = model(image, targets)
-    t1 = time.time()
-    print('Infer time: ', t1 - t0)
-
-    # Compute loss
-    loss = criterion(outputs, targets)
-    for k in loss.keys():
-        print("{} : {}".format(k, loss[k].item()))
-
-    print('==============================')
-    model.eval()
-    flops, params = profile(model, inputs=(image, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 187
models/detectors/vitdet/vitdet_decoder.py

@@ -1,187 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .basic_modules.basic import BasicConv
-except:
-    from  basic_modules.basic import BasicConv
-
-
-def build_decoder(cfg, in_dims, num_levels=3):
-    if cfg['decoder'] == "det_decoder":
-        decoder = MultiDetHead(cfg, in_dims, num_levels)
-    elif cfg['decoder'] == "seg_decoder":
-        decoder = MaskHead()
-    elif cfg['decoder'] == "pos_decoder":
-        decoder = PoseHead()
-
-    return decoder
-
-
-# ---------------------------- Detection Head ----------------------------
-## Single-level Detection Head
-class SingleDetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 ):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type, norm_type=norm_type)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type, norm_type=norm_type)
-                              )
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type, norm_type=norm_type)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type, norm_type=norm_type)
-                              )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class MultiDetHead(nn.Module):
-    def __init__(self, cfg, in_dims, num_levels=3):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [SingleDetHead(in_dim       = in_dims[level],
-                           cls_head_dim = cfg['hidden_dim'],
-                           reg_head_dim = cfg['hidden_dim'],
-                           num_cls_head = cfg['de_num_cls_layers'],
-                           num_reg_head = cfg['de_num_reg_layers'],
-                           act_type     = cfg['de_act'],
-                           norm_type    = cfg['de_norm'],
-                           )
-                           for level in range(num_levels)
-                           ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = self.multi_level_heads[0].cls_head_dim
-        self.reg_head_dim = self.multi_level_heads[0].reg_head_dim
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        outputs = {
-            "cls_feat": cls_feats,
-            "reg_feat": reg_feats
-        }
-
-        return outputs
-
-
-# ---------------------------- Segmentation Head ----------------------------
-class MaskHead(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-
-    def forward(self, x):
-        return
-
-
-# ---------------------------- Human-Pose Head ----------------------------
-class PoseHead(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-
-    def forward(self, x):
-        return
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'width': 1.0,
-        'depth': 1.0,
-        # Decoder parameters
-        'hidden_dim': 256,
-        'decoder': 'det_decoder',
-        'de_num_cls_layers': 2,
-        'de_num_reg_layers': 2,
-        'de_act': 'silu',
-        'de_norm': 'BN',
-    }
-    fpn_dims = [256, 256, 256]
-    out_dim = 256
-    # Head-1
-    model = build_decoder(cfg, fpn_dims, num_levels=3)
-    print(model)
-    fpn_feats = [torch.randn(1, fpn_dims[0], 80, 80), torch.randn(1, fpn_dims[1], 40, 40), torch.randn(1, fpn_dims[2], 20, 20)]
-    t0 = time.time()
-    outputs = model(fpn_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    # for out in outputs:
-    #     print(out.shape)
-
-    print('==============================')
-    flops, params = profile(model, inputs=(fpn_feats, ), verbose=False)
-    print('==============================')
-    print('Head-1: GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Head-1: Params : {:.2f} M'.format(params / 1e6))

+ 0 - 101
models/detectors/vitdet/vitdet_encoder.py

@@ -1,101 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .basic_modules.basic    import BasicConv, UpSampleWrapper
-    from .basic_modules.backbone import build_backbone
-except:
-    from  basic_modules.basic    import BasicConv, UpSampleWrapper
-    from  basic_modules.backbone import build_backbone
-
-
-# ----------------- Image Encoder -----------------
-def build_image_encoder(cfg):
-    return ImageEncoder(cfg)
-
-class ImageEncoder(nn.Module):
-    def __init__(self, cfg):
-        super().__init__()
-        # ---------------- Basic settings ----------------
-        ## Basic parameters
-        self.cfg = cfg
-        ## Network parameters
-        self.stride = 16
-        self.fpn_dims = [cfg['hidden_dim']] * 3
-        self.hidden_dim = cfg['hidden_dim']
-        
-        # ---------------- Network settings ----------------
-        ## Backbone Network
-        self.backbone, backbone_dim = build_backbone(cfg, cfg['pretrained'])
-
-        ## Input projection
-        self.input_proj = BasicConv(backbone_dim, cfg['hidden_dim'],
-                                    kernel_size=1,
-                                    act_type=None, norm_type='BN')
-
-        ## Upsample layer
-        self.upsample = UpSampleWrapper(cfg['hidden_dim'], 2.0)
-        
-        ## Downsample layer
-        self.downsample = BasicConv(cfg['hidden_dim'], cfg['hidden_dim'],
-                                    kernel_size=3, padding=1, stride=2,
-                                    act_type=None, norm_type='BN')
-
-        ## Output projection
-        self.output_projs = nn.ModuleList([BasicConv(cfg['hidden_dim'], cfg['hidden_dim'],
-                                                     kernel_size=3, padding=1,
-                                                     act_type='silu', norm_type='BN')
-                                                     ] * 3)
-
-
-    def forward(self, x):
-        # Backbone
-        feat = self.backbone(x)
-
-        # Input proj
-        feat = self.input_proj(feat)
-
-        # FPN
-        feat_up = self.upsample(feat)
-        feat_ds = self.downsample(feat)
-
-        # Multi level features: [P3, P4, P5]
-        pyramid_feats = [self.output_projs[0](feat_up),
-                         self.output_projs[1](feat),
-                         self.output_projs[2](feat_ds)]
-
-        return pyramid_feats
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    cfg = {
-        'width': 1.0,
-        'depth': 1.0,
-        'out_stride': 16,
-        'hidden_dim': 256,
-        # Image Encoder - Backbone
-        'backbone': 'resnet50',
-        'backbone_norm': 'FrozeBN',
-        'pretrained': True,
-        'freeze_at': 0,
-        'freeze_stem_only': False,
-    }
-    x = torch.rand(2, 3, 640, 640)
-    model = build_image_encoder(cfg)
-    model.train()
-
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    print(outputs.shape)
-
-    print('==============================')
-    model.eval()
-    x = torch.rand(1, 3, 640, 640)
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 181
models/detectors/vitdet/vitdet_head.py

@@ -1,181 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-def build_predictor(cfg, strides, num_classes, num_coords=4, num_levels=3):
-    if cfg['task'] == 'detection':
-        pred_layer = MultiDetPredLayer(cls_dim     = cfg['hidden_dim'],
-                                       reg_dim     = cfg['hidden_dim'],
-                                       strides     = strides,
-                                       num_classes = num_classes,
-                                       num_coords  = num_coords,
-                                       num_levels  = num_levels
-                                       )
-        
-    elif cfg['task'] == 'segmentation':
-        raise NotImplementedError
-
-    elif cfg['task'] == 'pose_estimation':
-        raise NotImplementedError
-
-    return pred_layer
-
-
-# ---------------------------- Detection predictor ----------------------------
-## Single-level Detection Prediction Layer
-class SingleDetPDLayer(nn.Module):
-    def __init__(self,
-                 cls_dim     :int = 256,
-                 reg_dim     :int = 256,
-                 stride      :int = 32,
-                 num_classes :int = 80,
-                 num_coords  :int = 4):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(reg_dim, num_coords, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred bias
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        # generate grid cells
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors += 0.5  # add center offset
-        anchors *= self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # pred
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # generate anchor boxes: [M, 4]
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-        # stride tensor: [M, 1]
-        stride_tensor = torch.ones_like(anchors[..., :1]) * self.stride
-        
-        # [B, C, H, W] -> [B, H, W, C] -> [B, M, C]
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
-
-        # ---------------- Decode bbox ----------------
-        ctr_pred = reg_pred[..., :2] * self.stride + anchors[..., :2]
-        wh_pred = torch.exp(reg_pred[..., 2:]) * self.stride
-        pred_x1y1 = ctr_pred - wh_pred * 0.5
-        pred_x2y2 = ctr_pred + wh_pred * 0.5
-        box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-        # output dict
-        outputs = {"pred_cls": cls_pred,             # (Tensor) [B, M, C]
-                   "pred_reg": reg_pred,             # (Tensor) [B, M, 4]
-                   "pred_box": box_pred,             # (Tensor) [B, M, 4] 
-                   "anchors": anchors,               # (Tensor) [M, 2]
-                   "stride": self.stride,            # (Int)
-                   "stride_tensors": stride_tensor   # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-
-# Multi-level pred layer
-class MultiDetPredLayer(nn.Module):
-    def __init__(self,
-                 cls_dim,
-                 reg_dim,
-                 strides,
-                 num_classes :int = 80,
-                 num_coords  :int = 4,
-                 num_levels  :int = 3):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.strides = strides
-        self.num_classes = num_classes
-        self.num_coords = num_coords
-        self.num_levels = num_levels
-
-        # ----------- Network Parameters -----------
-        ## multi-level pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [SingleDetPDLayer(cls_dim     = cls_dim,
-                              reg_dim     = reg_dim,
-                              stride      = strides[level],
-                              num_classes = num_classes,
-                              num_coords  = num_coords)
-                              for level in range(num_levels)
-                              ])
-        
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_strides = []
-        all_cls_preds = []
-        all_box_preds = []
-        all_reg_preds = []
-        for level in range(self.num_levels):
-            # ---------------- Single level prediction ----------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # collect results
-            all_cls_preds.append(outputs["pred_cls"])
-            all_box_preds.append(outputs["pred_box"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_anchors.append(outputs["anchors"])
-            all_strides.append(outputs["stride_tensors"])
-        
-        # output dict
-        outputs = {"pred_cls": all_cls_preds,      # List(Tensor) [B, M, C]
-                   "pred_box": all_box_preds,      # List(Tensor) [B, M, 4]
-                   "pred_reg": all_reg_preds,      # List(Tensor) [B, M, 4]
-                   "anchors": all_anchors,         # List(Tensor) [M, 2]
-                   "strides": self.strides,        # List(Int) [8, 16, 32]
-                   "stride_tensors": all_strides   # List(Tensor) [M, 1]
-                   }
-
-        return outputs
-    
-
-# -------------------- Segmentation predictor --------------------
-class MaskPDLayer(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-    
-    def forward(self, x):
-        return
-
-
-# -------------------- Human-Pose predictor --------------------
-class PosePDLayer(nn.Module):
-    def __init__(self, *args, **kwargs) -> None:
-        super().__init__(*args, **kwargs)
-    
-    def forward(self, x):
-        return