Browse Source

modify codes

yjh0410 1 year ago
parent
commit
08c2c7343d

+ 2 - 2
odlab/config/__init__.py

@@ -9,10 +9,10 @@ def build_config(args):
         return build_fcos_config(args)
     # YOLOF
     elif "yolof" in args.model:
-        return build_yolof_config
+        return build_yolof_config(args)
     # DETR
     elif "detr" in args.model:
-        return build_detr_config
+        return build_detr_config(args)
     
     else:
         raise NotImplementedError('Unknown Model: {}'.format(args.model))

+ 2 - 2
odlab/config/fcos_config.py

@@ -54,7 +54,7 @@ class FcosBaseConfig(object):
                                                          [128, 256],
                                                          [256, 512],
                                                          [512, float('inf')]]
-                                                         },
+                                                         }
 
         # --------- Loss weight ---------
         self.focal_loss_alpha = 0.25
@@ -79,7 +79,7 @@ class FcosBaseConfig(object):
         self.warmup_factor = 0.00066667
 
         # --------- Train epoch ---------
-        self.max_epoch = 12,       # 1x
+        self.max_epoch = 12        # 1x
         self.lr_epoch  = [8, 11]   # 1x
 
         # --------- Data process ---------

+ 5 - 5
odlab/config/yolof_config.py

@@ -29,18 +29,18 @@ class YolofBaseConfig(object):
         self.neck_norm = 'GN'
 
         # --------- Head ---------
-        self.head = 'yolof_head'
-        self.head_dim = 512
+        self.head         = 'yolof_head'
+        self.head_dim     = 512
         self.num_cls_head = 2
         self.num_reg_head = 4
         self.head_act     = 'relu'
         self.head_norm    = 'GN'
-        self.center_clamp = 32,        
+        self.center_clamp = 32
         self.anchor_size  = [[32, 32],
                              [64, 64],
                              [128, 128],
                              [256, 256],
-                             [512, 512]],
+                             [512, 512]]
 
         # --------- Post-process ---------
         self.train_topk = 1000
@@ -81,7 +81,7 @@ class YolofBaseConfig(object):
         self.warmup_factor = 0.00066667
 
         # --------- Train epoch ---------
-        self.max_epoch = 12,       # 1x
+        self.max_epoch = 12        # 1x
         self.lr_epoch  = [8, 11]   # 1x
 
         # --------- Data process ---------

+ 2 - 2
odlab/datasets/__init__.py

@@ -2,14 +2,14 @@
 import torch.utils.data
 from torch.utils.data import DataLoader, DistributedSampler
 
-from .coco import build_coco, coco_labels, coco_indexs
+from .coco import build_coco, coco_indexs
 from .transforms import build_transform
 
 
 def build_dataset(args, cfg, transform=None, is_train=False):
     if args.dataset == 'coco':
         dataset = build_coco(args, transform, is_train)
-        class_labels = coco_labels
+        class_labels = dataset.coco_labels
         num_classes  = 80
     cfg.class_labels = class_labels
     cfg.num_classes  = num_classes

+ 1 - 1
odlab/datasets/transforms.py

@@ -301,7 +301,7 @@ class Compose(object):
 
 
 # build transforms
-def build_transform(cfg=None, is_train=False):
+def build_transform(cfg, is_train=False):
     # ---------------- Transform for Training ----------------
     if is_train:
         transforms = []

+ 2 - 2
odlab/engine.py

@@ -31,7 +31,7 @@ def train_one_epoch(cfg,
     epoch_size = len(data_loader)
     print_freq = 10
 
-    for iter_i, (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
+    for iter_i, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
         ni = iter_i + epoch * epoch_size
         # WarmUp
         if ni < cfg.warmup_iters:
@@ -51,7 +51,7 @@ def train_one_epoch(cfg,
             vis_data(images, targets, masks, cfg.class_labels, cfg.normalize_coords, cfg.box_format)
 
         # Inference
-        outputs = model(images, masks, targets)
+        outputs = model(images, masks)
 
         # Compute loss
         loss_dict = criterion(outputs, targets)

+ 40 - 48
odlab/models/basic/conv.py

@@ -1,15 +1,14 @@
 import torch.nn as nn
 
 
-def get_conv2d(c1, c2, k, p, s, d, g):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g)
+# --------------------- Basic modules ---------------------
+def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
+    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
 
     return conv
 
 def get_activation(act_type=None):
-    if act_type is None:
-        return nn.Identity()
-    elif act_type == 'relu':
+    if act_type == 'relu':
         return nn.ReLU(inplace=True)
     elif act_type == 'lrelu':
         return nn.LeakyReLU(0.1, inplace=True)
@@ -17,11 +16,11 @@ def get_activation(act_type=None):
         return nn.Mish(inplace=True)
     elif act_type == 'silu':
         return nn.SiLU(inplace=True)
-    elif act_type == 'gelu':
-        return nn.GELU()
+    elif act_type is None:
+        return nn.Identity()
     else:
-        raise NotImplementedError(act_type)
-
+        raise NotImplementedError
+        
 def get_norm(norm_type, dim):
     if norm_type == 'BN':
         return nn.BatchNorm2d(dim)
@@ -30,46 +29,39 @@ def get_norm(norm_type, dim):
     elif norm_type is None:
         return nn.Identity()
     else:
-        raise NotImplementedError(norm_type)
-
-
-# ----------------- CNN ops -----------------
-class ConvModule(nn.Module):
-    def __init__(self,
-                 c1,
-                 c2,
-                 k=1,
-                 p=0,
-                 s=1,
-                 d=1,
-                 act_type='relu',
-                 norm_type='BN', 
-                 depthwise=False):
-        super(ConvModule, self).__init__()
-        convs = []
-        if depthwise:
-            convs.append(get_conv2d(c1, c1, k=k, p=p, s=s, d=d, g=c1))
-            # depthwise conv
-            if norm_type:
-                convs.append(get_norm(norm_type, c1))
-            if act_type:
-                convs.append(get_activation(act_type))
-            # pointwise conv
-            convs.append(get_conv2d(c1, c2, k=1, p=0, s=1, d=d, g=1))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
+        raise NotImplementedError
 
+class BasicConv(nn.Module):
+    def __init__(self, 
+                 in_dim,                   # in channels
+                 out_dim,                  # out channels 
+                 kernel_size=1,            # kernel size 
+                 padding=0,                # padding
+                 stride=1,                 # padding
+                 dilation=1,               # dilation
+                 act_type  :str = 'lrelu', # activation
+                 norm_type :str = 'BN',    # normalization
+                 depthwise :bool = False
+                ):
+        super(BasicConv, self).__init__()
+        self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
+        if not depthwise:
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
+            self.norm = get_norm(norm_type, out_dim)
         else:
-            convs.append(get_conv2d(c1, c2, k=k, p=p, s=s, d=d, g=1))
-            if norm_type:
-                convs.append(get_norm(norm_type, c2))
-            if act_type:
-                convs.append(get_activation(act_type))
-            
-        self.convs = nn.Sequential(*convs)
-
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
+            self.norm1 = get_norm(norm_type, in_dim)
+            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
+            self.norm2 = get_norm(norm_type, out_dim)
+        self.act  = get_activation(act_type)
 
     def forward(self, x):
-        return self.convs(x)
+        if not self.depthwise:
+            return self.act(self.norm(self.conv(x)))
+        else:
+            # Depthwise conv
+            x = self.norm1(self.conv1(x))
+            # Pointwise conv
+            x = self.norm2(self.conv2(x))
+            return x

+ 0 - 4
odlab/models/detectors/__init__.py

@@ -3,7 +3,6 @@ import torch
 
 from .fcos.build  import build_fcos
 from .yolof.build import build_yolof
-from .detr.build  import build_detr
 
 
 def build_model(args, cfg, is_val=False):
@@ -14,9 +13,6 @@ def build_model(args, cfg, is_val=False):
     ## YOLOF    
     elif 'yolof' in args.model:
         model, criterion = build_yolof(cfg, is_val)
-    ## DETR    
-    elif 'detr' in args.model:
-        model, criterion = build_detr(cfg, is_val)
     else:
         raise NotImplementedError("Unknown detector: {}".args.model)
     

+ 0 - 57
odlab/models/detectors/detr/README.md

@@ -1,57 +0,0 @@
-# PlainDETR
-
-Our `PlainDETR-R50-1x` baseline on COCO-val:
-```Shell
-```
-
-## Results on COCO
-
-| Model           |  Scale     |  Pretrained  |  FPS  | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | Weight | Logs  |
-| --------------- | ---------- | ------------ | ----- | ---------------------- |  ---------------  | ------ | ----- |
-| PlainDETR-R50   |  800,1333  |   IN1K-Cls   |       |                        |                   |  |  |
-| PlainDETR-R50   |  800,1333  |   IN1K-MIM   |       |                        |                   |  |  |
-
-- We explore whether PlainDETR can still be powerful when using ResNet as the backbone.
-- We set up two comparative experiments, using the ResNet-50 pre-trained for the IN1K classification task and the ResNet-50 pre-trained by IN1K's MIM as the backbone of PlainDETR. Among them, we used the MIM pre-trained ResNet-50 provided by [SparK](https://github.com/keyu-tian/SparK).
-
-
-## Train PlainDETR
-### Single GPU
-Taking training **PlainDETR** on COCO as the example,
-```Shell
-python main.py --cuda -d coco --root path/to/coco -m plain_detr_r50 --batch_size 16 --eval_epoch 2
-```
-
-### Multi GPU
-Taking training **PlainDETR** on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda -dist -d coco --root path/to/coco -m plain_detr_r50 --batch_size 16 --eval_epoch 2 
-```
-
-## Test PlainDETR
-Taking testing **PlainDETR** on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m plain_detr_r50 --weight path/to/plain_detr_r50.pth -vt 0.4 --show 
-```
-
-## Evaluate PlainDETR
-Taking evaluating **PlainDETR** on COCO-val as the example,
-```Shell
-python main.py --cuda -d coco --root path/to/coco -m plain_detr_r50 --resume path/to/plain_detr_r50.pth --eval_first
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m plain_detr_r50 --weight path/to/weight -vt 0.4 --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m plain_detr_r50 --weight path/to/weight -vt 0.4 --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m plain_detr_r50 --weight path/to/weight -vt 0.4 --show --gif
-```

+ 0 - 25
odlab/models/detectors/detr/build.py

@@ -1,25 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-
-from .criterion import build_criterion
-from .detr import DETR
-
-
-# build object detector
-def build_detr(cfg, num_classes=80, is_val=False):
-    # -------------- Build RT-DETR --------------
-    model = DETR(cfg         = cfg,
-                 num_classes = num_classes,
-                 conf_thresh = cfg['train_conf_thresh'] if is_val else cfg['test_conf_thresh'],
-                 nms_thresh  = cfg['train_nms_thresh']  if is_val else cfg['test_nms_thresh'],
-                 topk        = cfg['train_topk']        if is_val else cfg['test_topk'],
-                 use_nms     = False,
-                 )
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if is_val:
-        # build criterion for training
-        criterion = build_criterion(cfg, num_classes, aux_loss=True)
-        
-    return model, criterion

+ 0 - 212
odlab/models/detectors/detr/criterion.py

@@ -1,212 +0,0 @@
-import copy
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .matcher import HungarianMatcher
-
-from utils.misc import sigmoid_focal_loss
-from utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou, bbox2delta
-from utils.distributed_utils import is_dist_avail_and_initialized, get_world_size
-
-
-# build criterion
-def build_criterion(cfg, num_classes, aux_loss=True):
-    criterion = Criterion(cfg, num_classes, aux_loss)
-
-    return criterion
-    
-    
-class Criterion(nn.Module):
-    def __init__(self, cfg, num_classes=80, aux_loss=False):
-        super().__init__()
-        # ------------ Basic parameters ------------
-        self.cfg = cfg
-        self.num_classes = num_classes
-        self.k_one2many = cfg['k_one2many']
-        self.lambda_one2many = cfg['lambda_one2many']
-        self.aux_loss = aux_loss
-        self.losses = ['labels', 'boxes']
-        # ------------- Focal loss -------------
-        self.alpha = 0.25
-        self.gamma = 2.0
-        # ------------ Matcher ------------
-        self.matcher = HungarianMatcher(cost_class = cfg['matcher_hpy']['cost_class'],
-                                        cost_bbox  = cfg['matcher_hpy']['cost_bbox'],
-                                        cost_giou  = cfg['matcher_hpy']['cost_giou']
-                                        )
-        # ------------- Loss weight -------------
-        weight_dict = {'loss_cls':  cfg['loss_coeff']['class'],
-                       'loss_box':  cfg['loss_coeff']['bbox'],
-                       'loss_giou': cfg['loss_coeff']['giou']}
-        if aux_loss:
-            aux_weight_dict = {}
-            for i in range(cfg['de_num_layers'] - 1):
-                aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
-            aux_weight_dict.update({k + "_enc": v for k, v in weight_dict.items()})
-            weight_dict.update(aux_weight_dict)
-        new_dict = dict()
-        for key, value in weight_dict.items():
-            new_dict[key] = value
-            new_dict[key + "_one2many"] = value
-        self.weight_dict = new_dict
-
-
-    def _get_src_permutation_idx(self, indices):
-        # permute predictions following indices
-        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
-        src_idx = torch.cat([src for (src, _) in indices])
-        return batch_idx, src_idx
-
-    def _get_tgt_permutation_idx(self, indices):
-        # permute targets following indices
-        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
-        tgt_idx = torch.cat([tgt for (_, tgt) in indices])
-        return batch_idx, tgt_idx
-
-    def loss_labels(self, outputs, targets, indices, num_boxes):
-        """Classification loss (NLL)
-        targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
-        """
-        assert 'pred_logits' in outputs
-        src_logits = outputs['pred_logits']
-        # prepare class targets
-        idx = self._get_src_permutation_idx(indices)
-        target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]).to(src_logits.device)
-        target_classes = torch.full(src_logits.shape[:2],
-                                    self.num_classes,
-                                    dtype=torch.int64,
-                                    device=src_logits.device)
-        target_classes[idx] = target_classes_o
-
-        # to one-hot labels
-        target_classes_onehot = torch.zeros([*src_logits.shape[:2], self.num_classes + 1],
-                                            dtype=src_logits.dtype,
-                                            layout=src_logits.layout,
-                                            device=src_logits.device)
-        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
-        target_classes_onehot = target_classes_onehot[..., :-1]
-
-        # focal loss
-        loss_cls = sigmoid_focal_loss(src_logits, target_classes_onehot, self.alpha, self.gamma)
-
-        losses = {}
-        losses['loss_cls'] = loss_cls.sum() / num_boxes
-
-        return losses
-
-    def loss_boxes(self, outputs, targets, indices, num_boxes):
-        """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
-           targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
-           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
-        """
-        assert 'pred_boxes' in outputs
-        # prepare bbox targets
-        idx = self._get_src_permutation_idx(indices)
-        src_boxes = outputs['pred_boxes'][idx]
-        target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0).to(src_boxes.device)
-        
-        # compute L1 loss
-        src_deltas = outputs["pred_deltas"][idx]
-        src_boxes_old = outputs["pred_boxes_old"][idx]
-        target_deltas = bbox2delta(src_boxes_old, target_boxes)
-        loss_bbox = F.l1_loss(src_deltas, target_deltas, reduction="none")
-
-        # compute GIoU loss
-        bbox_giou = generalized_box_iou(box_cxcywh_to_xyxy(src_boxes),
-                                        box_cxcywh_to_xyxy(target_boxes))
-        loss_giou = 1 - torch.diag(bbox_giou)
-        
-        losses = {}
-        losses['loss_box'] = loss_bbox.sum() / num_boxes
-        losses['loss_giou'] = loss_giou.sum() / num_boxes
-
-        return losses
-
-    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
-        loss_map = {
-            'labels': self.loss_labels,
-            'boxes': self.loss_boxes,
-        }
-        assert loss in loss_map, f'do you really want to compute {loss} loss?'
-        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
-
-    def compute_loss(self, outputs, targets):
-        """ This performs the loss computation.
-        Parameters:
-             outputs: dict of tensors, see the output specification of the model for the format
-             targets: list of dicts, such that len(targets) == batch_size.
-                      The expected keys in each dict depends on the losses applied, see each loss' doc
-        """
-        outputs_without_aux = {
-            k: v
-            for k, v in outputs.items()
-            if k != "aux_outputs" and k != "enc_outputs"
-        }
-
-        # Retrieve the matching between the outputs of the last layer and the targets
-        indices = self.matcher(outputs_without_aux, targets)
-
-        # Compute the average number of target boxes accross all nodes, for normalization purposes
-        num_boxes = sum(len(t["labels"]) for t in targets)
-        num_boxes = torch.as_tensor(
-            [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device
-        )
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_boxes)
-        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
-
-        # Compute all the requested losses
-        losses = {}
-        for loss in self.losses:
-            kwargs = {}
-            l_dict = self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs)
-            losses.update(l_dict)
-
-        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
-        if "aux_outputs" in outputs:
-            for i, aux_outputs in enumerate(outputs["aux_outputs"]):
-                indices = self.matcher(aux_outputs, targets)
-                for loss in self.losses:
-                    kwargs = {}
-                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
-                    l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
-                    losses.update(l_dict)
-
-        if "enc_outputs" in outputs:
-            enc_outputs = outputs["enc_outputs"]
-            bin_targets = copy.deepcopy(targets)
-            for bt in bin_targets:
-                bt["labels"] = torch.zeros_like(bt["labels"])
-            indices = self.matcher(enc_outputs, bin_targets)
-            for loss in self.losses:
-                kwargs = {}
-                l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)
-                l_dict = {k + "_enc": v for k, v in l_dict.items()}
-                losses.update(l_dict)
-
-        return losses
-
-    def forward(self, outputs, targets):
-        # --------------------- One-to-one losses ---------------------
-        outputs_one2one = {k: v for k, v in outputs.items() if "one2many" not in k}
-        loss_dict = self.compute_loss(outputs_one2one, targets)
-
-        # --------------------- One-to-many losses ---------------------
-        outputs_one2many = {k[:-9]: v for k, v in outputs.items() if "one2many" in k}
-        if len(outputs_one2many) > 0:
-            # Copy targets
-            multi_targets = copy.deepcopy(targets)
-            for target in multi_targets:
-                target["boxes"] = target["boxes"].repeat(self.k_one2many, 1)
-                target["labels"] = target["labels"].repeat(self.k_one2many)
-            # Compute one-to-many losses
-            one2many_loss_dict = self.compute_loss(outputs_one2many, multi_targets)
-            # add one2many losses in to the final loss_dict
-            for k, v in one2many_loss_dict.items():
-                if k + "_one2many" in loss_dict.keys():
-                    loss_dict[k + "_one2many"] += v * self.lambda_one2many
-                else:
-                    loss_dict[k + "_one2many"] = v * self.lambda_one2many
-
-        return loss_dict

+ 0 - 347
odlab/models/detectors/detr/detr.py

@@ -1,347 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-
-from ...backbone          import build_backbone
-from ...basic.mlp         import MLP
-from ...basic.conv        import BasicConv, UpSampleWrapper
-from ...basic.transformer import TransformerEncoder, PlainDETRTransformer, get_clones
-
-from utils.misc import multiclass_nms
-
-
-# DETR
-class DETR(nn.Module):
-    def __init__(self,
-                 cfg,
-                 num_classes = 80,
-                 conf_thresh = 0.1,
-                 nms_thresh  = 0.5,
-                 topk        = 300,
-                 use_nms     = False,
-                 ca_nms      = False,
-                 ):
-        super().__init__()
-        # ---------------- Basic setting ----------------
-        self.stride = cfg['out_stride']
-        self.upsample_factor = cfg['max_stride'] // cfg['out_stride']
-        self.num_classes = num_classes
-        ## Transformer parameters
-        self.num_queries_one2one = cfg['num_queries_one2one']
-        self.num_queries_one2many = cfg['num_queries_one2many']
-        self.num_queries = self.num_queries_one2one + self.num_queries_one2many
-        ## Post-process parameters
-        self.ca_nms = ca_nms
-        self.use_nms = use_nms
-        self.num_topk = topk
-        self.nms_thresh = nms_thresh
-        self.conf_thresh = conf_thresh
-
-        # ---------------- Network setting ----------------
-        ## Backbone Network
-        self.backbone, feat_dims = build_backbone(cfg)
-
-        ## Input projection
-        self.input_proj = BasicConv(feat_dims[-1], cfg['hidden_dim'], kernel_size=1, act_type=None, norm_type='GN')
-
-        ## Transformer Encoder
-        self.transformer_encoder = TransformerEncoder(d_model    = cfg['hidden_dim'],
-                                                      num_heads  = cfg['en_num_heads'],
-                                                      num_layers = cfg['en_num_layers'],
-                                                      ffn_dim    = cfg['en_ffn_dim'],
-                                                      dropout    = cfg['en_dropout'],
-                                                      act_type   = cfg['en_act'],
-                                                      pre_norm   = cfg['en_pre_norm'],
-                                                      )
-
-        ## Upsample layer
-        self.upsample = UpSampleWrapper(cfg['hidden_dim'], self.upsample_factor)
-        
-        ## Output projection
-        self.output_proj = BasicConv(cfg['hidden_dim'], cfg['hidden_dim'], kernel_size=3, padding=1, act_type='silu', norm_type='BN')
-        
-        ## Transformer
-        self.query_embed = nn.Embedding(self.num_queries, cfg['hidden_dim'])
-        self.transformer = PlainDETRTransformer(d_model             = cfg['hidden_dim'],
-                                                num_heads           = cfg['de_num_heads'],
-                                                ffn_dim             = cfg['de_ffn_dim'],
-                                                dropout             = cfg['de_dropout'],
-                                                act_type            = cfg['de_act'],
-                                                pre_norm            = cfg['de_pre_norm'],
-                                                rpe_hidden_dim      = cfg['rpe_hidden_dim'],
-                                                feature_stride      = cfg['out_stride'],
-                                                num_layers          = cfg['de_num_layers'],
-                                                use_checkpoint      = cfg['use_checkpoint'],
-                                                num_queries_one2one = cfg['num_queries_one2one'],
-                                                num_queries_one2many    = cfg['num_queries_one2many'],
-                                                proposal_feature_levels = cfg['proposal_feature_levels'],
-                                                proposal_in_stride      = cfg['out_stride'],
-                                                proposal_tgt_strides    = cfg['proposal_tgt_strides'],
-                                                return_intermediate = True,
-                                                )
-    
-        ## Detect Head
-        class_embed = nn.Linear(cfg['hidden_dim'], num_classes)
-        bbox_embed = MLP(cfg['hidden_dim'], cfg['hidden_dim'], 4, 3)
-
-        prior_prob = 0.01
-        bias_value = -math.log((1 - prior_prob) / prior_prob)
-        class_embed.bias.data = torch.ones(num_classes) * bias_value
-        nn.init.constant_(bbox_embed.layers[-1].weight.data, 0)
-        nn.init.constant_(bbox_embed.layers[-1].bias.data, 0)
-
-        self.class_embed = get_clones(class_embed, cfg['de_num_layers'] + 1)
-        self.bbox_embed  = get_clones(bbox_embed, cfg['de_num_layers'] + 1)
-        nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
-
-        self.transformer.decoder.bbox_embed = self.bbox_embed
-        self.transformer.decoder.class_embed = self.class_embed
-
-    def get_posembed(self, d_model, mask, temperature=10000, normalize=False):
-        not_mask = ~mask
-        scale = 2 * torch.pi
-        num_pos_feats = d_model // 2
-
-        # -------------- Generate XY coords --------------
-        ## [B, H, W]
-        y_embed = not_mask.cumsum(1, dtype=torch.float32)
-        x_embed = not_mask.cumsum(2, dtype=torch.float32)
-        ## Normalize coords
-        if normalize:
-            y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + 1e-6)
-            x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + 1e-6)
-        else:
-            y_embed = y_embed - 0.5
-            x_embed = x_embed - 0.5
-        # [H, W] -> [B, H, W, 2]
-        pos = torch.stack([x_embed, y_embed], dim=-1)
-
-        # -------------- Sine-PosEmbedding --------------
-        dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
-        dim_t_ = torch.div(dim_t, 2, rounding_mode='floor') / num_pos_feats
-        dim_t = temperature ** (2 * dim_t_)
-
-        x_embed = pos[..., 0] * scale
-        y_embed = pos[..., 1] * scale
-        pos_x = x_embed[..., None] / dim_t
-        pos_y = y_embed[..., None] / dim_t
-        pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
-        pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
-        pos_embed = torch.cat((pos_y, pos_x), dim=-1)
-        
-        # [B, H, W, C] -> [B, C, H, W]
-        pos_embed = pos_embed.permute(0, 3, 1, 2)
-        
-        return pos_embed
-
-    def post_process(self, box_pred, cls_pred):
-        # Top-k select
-        cls_pred = cls_pred[0].flatten().sigmoid_()
-        box_pred = box_pred[0]
-
-        # Keep top k top scoring indices only.
-        num_topk = min(self.num_topk, box_pred.size(0))
-
-        # Topk candidates
-        predicted_prob, topk_idxs = cls_pred.sort(descending=True)
-        topk_scores = predicted_prob[:num_topk]
-        topk_idxs = topk_idxs[:self.num_topk]
-
-        # Filter out the proposals with low confidence score
-        keep_idxs = topk_scores > self.conf_thresh
-        topk_scores = topk_scores[keep_idxs]
-        topk_idxs = topk_idxs[keep_idxs]
-        topk_box_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-
-        ## Top-k results
-        topk_labels = topk_idxs % self.num_classes
-        topk_bboxes = box_pred[topk_box_idxs]
-
-        topk_scores = topk_scores.cpu().numpy()
-        topk_labels = topk_labels.cpu().numpy()
-        topk_bboxes = topk_bboxes.cpu().numpy()
-
-        # nms
-        if self.use_nms:
-            topk_scores, topk_labels, topk_bboxes = multiclass_nms(
-                topk_scores, topk_labels, topk_bboxes, self.nms_thresh, self.num_classes, self.nms_class_agnostic)
-
-        return topk_bboxes, topk_scores, topk_labels
-
-    def resize_mask(self, src, mask=None):
-        bs, c, h, w = src.shape
-        if mask is not None:
-            # [B, H, W]
-            mask = nn.functional.interpolate(mask[None].float(), size=[h, w]).bool()[0]
-        else:
-            mask = torch.zeros([bs, h, w], device=src.device, dtype=torch.bool)
-
-        return mask
-    
-    @torch.jit.unused
-    def _set_aux_loss(self, outputs_class, outputs_coord, outputs_coord_old, outputs_deltas):
-        # this is a workaround to make torchscript happy, as torchscript
-        # doesn't support dictionary with non-homogeneous values, such
-        # as a dict having both a Tensor and a list.
-        return [
-            {"pred_logits": a, "pred_boxes": b, "pred_boxes_old": c, "pred_deltas": d, }
-            for a, b, c, d in zip(outputs_class[:-1], outputs_coord[:-1], outputs_coord_old[:-1], outputs_deltas[:-1])
-        ]
-
-    def inference_single_image(self, x):
-        # ----------- Image Encoder -----------
-        pyramid_feats = self.backbone(x)
-        src = self.input_proj(pyramid_feats[-1])
-        src = self.transformer_encoder(src)
-        src = self.upsample(src)
-        src = self.output_proj(src)
-
-        # ----------- Prepare inputs for Transformer -----------
-        mask = self.resize_mask(src)
-        pos_embed = self.get_posembed(src.shape[1], mask, normalize=False)
-        query_embeds = self.query_embed.weight[:self.num_queries_one2one]
-        self_attn_mask = None
-
-        # -----------Transformer -----------
-        (
-            hs,
-            init_reference,
-            inter_references,
-            _,
-            _,
-            _,
-            _,
-            max_shape
-        ) = self.transformer(src, mask, pos_embed, query_embeds, self_attn_mask)
-
-        # ----------- Process outputs -----------
-        outputs_classes_one2one = []
-        outputs_coords_one2one = []
-        outputs_deltas_one2one = []
-
-        for lid in range(hs.shape[0]):
-            if lid == 0:
-                reference = init_reference
-            else:
-                reference = inter_references[lid - 1]
-            outputs_class = self.class_embed[lid](hs[lid])
-            tmp = self.bbox_embed[lid](hs[lid])
-            outputs_coord = self.transformer.decoder.delta2bbox(reference, tmp, max_shape)  # xyxy
-
-            outputs_classes_one2one.append(outputs_class[:, :self.num_queries_one2one])
-            outputs_coords_one2one.append(outputs_coord[:, :self.num_queries_one2one])
-            outputs_deltas_one2one.append(tmp[:, :self.num_queries_one2one])
-
-        outputs_classes_one2one = torch.stack(outputs_classes_one2one)
-        outputs_coords_one2one = torch.stack(outputs_coords_one2one)
-
-        # ------------ Post process ------------
-        cls_pred = outputs_classes_one2one[-1]
-        box_pred = outputs_coords_one2one[-1]
-        
-        # post-process
-        bboxes, scores, labels = self.post_process(box_pred, cls_pred)
-        # normalize bbox
-        bboxes[..., 0::2] /= x.shape[-1]
-        bboxes[..., 1::2] /= x.shape[-2]
-        bboxes = bboxes.clip(0., 1.)
-
-        return bboxes, scores, labels
-        
-    def forward(self, x, src_mask=None, targets=None):
-        if not self.training:
-            return self.inference_single_image(x)
-
-        # ----------- Image Encoder -----------
-        pyramid_feats = self.backbone(x)
-        src = self.input_proj(pyramid_feats[-1])
-        src = self.transformer_encoder(src)
-        src = self.upsample(src)
-        src = self.output_proj(src)
-
-        # ----------- Prepare inputs for Transformer -----------
-        mask = self.resize_mask(src, src_mask)
-        pos_embed = self.get_posembed(src.shape[1], mask, normalize=False)
-        query_embeds = self.query_embed.weight
-        self_attn_mask = torch.zeros(
-            [self.num_queries, self.num_queries, ]).bool().to(src.device)
-        self_attn_mask[self.num_queries_one2one:, 0: self.num_queries_one2one, ] = True
-        self_attn_mask[0: self.num_queries_one2one, self.num_queries_one2one:, ] = True
-
-        # -----------Transformer -----------
-        (
-            hs,
-            init_reference,
-            inter_references,
-            enc_outputs_class,
-            enc_outputs_coord_unact,
-            enc_outputs_delta,
-            output_proposals,
-            max_shape
-        ) = self.transformer(src, mask, pos_embed, query_embeds, self_attn_mask)
-
-        # ----------- Process outputs -----------
-        outputs_classes_one2one = []
-        outputs_coords_one2one = []
-        outputs_coords_old_one2one = []
-        outputs_deltas_one2one = []
-
-        outputs_classes_one2many = []
-        outputs_coords_one2many = []
-        outputs_coords_old_one2many = []
-        outputs_deltas_one2many = []
-
-        for lid in range(hs.shape[0]):
-            if lid == 0:
-                reference = init_reference
-            else:
-                reference = inter_references[lid - 1]
-            outputs_class = self.class_embed[lid](hs[lid])
-            tmp = self.bbox_embed[lid](hs[lid])
-            outputs_coord = self.transformer.decoder.box_xyxy_to_cxcywh(
-                self.transformer.decoder.delta2bbox(reference, tmp, max_shape))
-
-            outputs_classes_one2one.append(outputs_class[:, 0: self.num_queries_one2one])
-            outputs_classes_one2many.append(outputs_class[:, self.num_queries_one2one:])
-
-            outputs_coords_one2one.append(outputs_coord[:, 0: self.num_queries_one2one])
-            outputs_coords_one2many.append(outputs_coord[:, self.num_queries_one2one:])
-
-            outputs_coords_old_one2one.append(reference[:, :self.num_queries_one2one])
-            outputs_coords_old_one2many.append(reference[:, self.num_queries_one2one:])
-            outputs_deltas_one2one.append(tmp[:, :self.num_queries_one2one])
-            outputs_deltas_one2many.append(tmp[:, self.num_queries_one2one:])
-
-        outputs_classes_one2one = torch.stack(outputs_classes_one2one)
-        outputs_coords_one2one = torch.stack(outputs_coords_one2one)
-
-        outputs_classes_one2many = torch.stack(outputs_classes_one2many)
-        outputs_coords_one2many = torch.stack(outputs_coords_one2many)
-
-        out = {
-            "pred_logits": outputs_classes_one2one[-1],
-            "pred_boxes": outputs_coords_one2one[-1],
-            "pred_logits_one2many": outputs_classes_one2many[-1],
-            "pred_boxes_one2many": outputs_coords_one2many[-1],
-
-            "pred_boxes_old": outputs_coords_old_one2one[-1],
-            "pred_deltas": outputs_deltas_one2one[-1],
-            "pred_boxes_old_one2many": outputs_coords_old_one2many[-1],
-            "pred_deltas_one2many": outputs_deltas_one2many[-1],
-        }
-
-        out["aux_outputs"] = self._set_aux_loss(
-            outputs_classes_one2one, outputs_coords_one2one, outputs_coords_old_one2one, outputs_deltas_one2one
-        )
-        out["aux_outputs_one2many"] = self._set_aux_loss(
-            outputs_classes_one2many, outputs_coords_one2many, outputs_coords_old_one2many, outputs_deltas_one2many
-        )
-
-        out["enc_outputs"] = {
-            "pred_logits": enc_outputs_class,
-            "pred_boxes": enc_outputs_coord_unact,
-            "pred_boxes_old": output_proposals,
-            "pred_deltas": enc_outputs_delta,
-        }
-
-        return out

+ 0 - 99
odlab/models/detectors/detr/matcher.py

@@ -1,99 +0,0 @@
-# ------------------------------------------------------------------------
-# Plain-DETR
-# Copyright (c) 2023 Xi'an Jiaotong University & Microsoft Research Asia.
-# Licensed under The MIT License [see LICENSE for details]
-# ------------------------------------------------------------------------
-# Deformable DETR
-# Copyright (c) 2020 SenseTime. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-# Modified from DETR (https://github.com/facebookresearch/detr)
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-# ------------------------------------------------------------------------
-
-"""
-Modules to compute the matching cost and solve the corresponding LSAP.
-"""
-import torch
-from scipy.optimize import linear_sum_assignment
-from torch import nn
-
-from utils.box_ops import box_cxcywh_to_xyxy, generalized_box_iou, bbox2delta
-
-
-class HungarianMatcher(nn.Module):
-    def __init__(self,
-                 cost_class: float = 1,
-                 cost_bbox:  float = 1,
-                 cost_giou:  float = 1,
-                 ):
-        super().__init__()
-        self.cost_class = cost_class
-        self.cost_bbox = cost_bbox
-        self.cost_giou = cost_giou
-        assert (
-            cost_class != 0 or cost_bbox != 0 or cost_giou != 0
-        ), "all costs cant be 0"
-
-    def forward(self, outputs, targets):
-        """ Performs the matching
-
-        Params:
-            outputs: This is a dict that contains at least these entries:
-                 "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
-                 "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
-
-            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
-                 "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
-                           objects in the target) containing the class labels
-                 "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
-
-        Returns:
-            A list of size batch_size, containing tuples of (index_i, index_j) where:
-                - index_i is the indices of the selected predictions (in order)
-                - index_j is the indices of the corresponding selected targets (in order)
-            For each batch element, it holds:
-                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
-        """
-        with torch.no_grad():
-            bs, num_queries = outputs["pred_logits"].shape[:2]
-
-            # We flatten to compute the cost matrices in a batch
-            out_prob = outputs["pred_logits"].flatten(0, 1).sigmoid()
-            out_bbox = outputs["pred_boxes"].flatten(0, 1)
-
-            # Also concat the target labels and boxes
-            tgt_ids = torch.cat([v["labels"] for v in targets]).to(out_prob.device)
-            tgt_bbox = torch.cat([v["boxes"] for v in targets]).to(out_prob.device)
-
-            # Compute the classification cost.
-            alpha = 0.25
-            gamma = 2.0
-            neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())
-            pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
-            cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]
-
-            # Compute the L1 cost between boxes
-            out_delta = outputs["pred_deltas"].flatten(0, 1)
-            out_bbox_old = outputs["pred_boxes_old"].flatten(0, 1)
-            tgt_delta = bbox2delta(out_bbox_old, tgt_bbox)
-            cost_bbox = torch.cdist(out_delta[:, None], tgt_delta, p=1).squeeze(1)
-
-            # Compute the giou cost betwen boxes
-            cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),
-                                             box_cxcywh_to_xyxy(tgt_bbox)
-            )
-
-            # Final cost matrix
-            C = self.cost_bbox  * cost_bbox + \
-                self.cost_class * cost_class + \
-                self.cost_giou  * cost_giou
-            C = C.view(bs, num_queries, -1).cpu()
-
-            sizes = [len(v["boxes"]) for v in targets]
-            indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
-            
-            return [(torch.as_tensor(i, dtype=torch.int64),  # batch index
-                     torch.as_tensor(j, dtype=torch.int64))  # query index
-                     for i, j in indices]
-        

+ 2 - 2
odlab/models/detectors/fcos/fcos.py

@@ -33,10 +33,10 @@ class FCOS(nn.Module):
         self.backbone, feat_dims = build_backbone(cfg)
 
         ## Neck
-        self.fpn = build_neck(cfg, feat_dims, cfg['head_dim'])
+        self.fpn = build_neck(cfg, feat_dims, cfg.head_dim)
         
         ## Heads
-        self.head = build_head(cfg, cfg['head_dim'], cfg['head_dim'], num_classes)
+        self.head = build_head(cfg, cfg.head_dim, cfg.head_dim)
 
     def post_process(self, cls_preds, ctn_preds, box_preds):
         """

+ 2 - 2
odlab/models/detectors/yolof/yolof.py

@@ -33,10 +33,10 @@ class YOLOF(nn.Module):
         self.backbone, feat_dims = build_backbone(cfg)
 
         ## Neck
-        self.neck = build_neck(cfg, feat_dims[-1], cfg['head_dim'])
+        self.neck = build_neck(cfg, feat_dims[-1], cfg.head_dim)
         
         ## Heads
-        self.head = build_head(cfg, cfg['head_dim'], cfg['head_dim'], num_classes)
+        self.head = build_head(cfg, cfg.head_dim, cfg.head_dim)
 
     def post_process(self, cls_pred, box_pred):
         """

+ 3 - 19
odlab/models/head/__init__.py

@@ -3,29 +3,13 @@ from .fcos_head      import FcosHead
 
 
 # build head
-def build_head(cfg, in_dim, out_dim, num_classes):
+def build_head(cfg, in_dim, out_dim):
     print('==============================')
     print('Head: {}'.format(cfg.head))
     
     if cfg.head == 'fcos_head':
-        model = FcosHead(cfg          = cfg,
-                         in_dim       = in_dim,
-                         out_dim      = out_dim,
-                         num_classes  = num_classes,
-                         num_cls_head = cfg.num_cls_head,
-                         num_reg_head = cfg.num_reg_head,
-                         act_type     = cfg.head_act,
-                         norm_type    = cfg.head_norm,
-                         )
+        model = FcosHead(cfg, in_dim, out_dim)
     elif cfg.head == 'yolof_head':
-        model = YolofHead(cfg          = cfg,
-                          in_dim       = in_dim,
-                          out_dim      = out_dim,
-                          num_classes  = num_classes,
-                          num_cls_head = cfg.num_cls_head,
-                          num_reg_head = cfg.num_reg_head,
-                          act_type     = cfg.head_act,
-                          norm_type    = cfg.head_norm,
-                          )
+        model = YolofHead(cfg, in_dim, out_dim)
 
     return model

+ 29 - 28
odlab/models/head/fcos_head.py

@@ -1,7 +1,7 @@
 import torch
 import torch.nn as nn
 
-from ..basic.conv import ConvModule
+from ..basic.conv import BasicConv
 
 
 class Scale(nn.Module):
@@ -24,20 +24,19 @@ class Scale(nn.Module):
         """
         return x * self.scale
 
-
 class FcosHead(nn.Module):
-    def __init__(self, cfg, in_dim, out_dim, num_classes, num_cls_head=1, num_reg_head=1, act_type='relu', norm_type='BN'):
+    def __init__(self, cfg, in_dim, out_dim,):
         super().__init__()
         self.fmp_size = None
         # ------------------ Basic parameters -------------------
         self.cfg = cfg
         self.in_dim = in_dim
-        self.num_classes = num_classes
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.stride = cfg.out_stride
+        self.stride       = cfg.out_stride
+        self.num_classes  = cfg.num_classes
+        self.num_cls_head = cfg.num_cls_head
+        self.num_reg_head = cfg.num_reg_head
+        self.act_type     = cfg.head_act
+        self.norm_type    = cfg.head_norm
 
         # ------------------ Network parameters -------------------
         ## cls head
@@ -46,16 +45,16 @@ class FcosHead(nn.Module):
         for i in range(self.num_cls_head):
             if i == 0:
                 cls_heads.append(
-                    ConvModule(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(in_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
             else:
                 cls_heads.append(
-                    ConvModule(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(self.cls_head_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
         
         ## reg head
         reg_heads = []
@@ -63,21 +62,21 @@ class FcosHead(nn.Module):
         for i in range(self.num_reg_head):
             if i == 0:
                 reg_heads.append(
-                    ConvModule(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(in_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
             else:
                 reg_heads.append(
-                    ConvModule(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(self.reg_head_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
         self.cls_heads = nn.Sequential(*cls_heads)
         self.reg_heads = nn.Sequential(*reg_heads)
 
         ## pred layers
-        self.cls_pred = nn.Conv2d(self.cls_head_dim, num_classes, kernel_size=3, padding=1)
+        self.cls_pred = nn.Conv2d(self.cls_head_dim, cfg.num_classes, kernel_size=3, padding=1)
         self.reg_pred = nn.Conv2d(self.reg_head_dim, 4, kernel_size=3, padding=1)
         self.ctn_pred = nn.Conv2d(self.reg_head_dim, 1, kernel_size=3, padding=1)
         
@@ -94,10 +93,12 @@ class FcosHead(nn.Module):
             for layer in module.modules():
                 if isinstance(layer, nn.Conv2d):
                     torch.nn.init.normal_(layer.weight, mean=0, std=0.01)
-                    torch.nn.init.constant_(layer.bias, 0)
+                    if layer.bias is not None:
+                        torch.nn.init.constant_(layer.bias, 0)
                 if isinstance(layer, nn.GroupNorm):
                     torch.nn.init.constant_(layer.weight, 1)
-                    torch.nn.init.constant_(layer.bias, 0)
+                    if layer.bias is not None:
+                        torch.nn.init.constant_(layer.bias, 0)
         # init the bias of cls pred
         init_prob = 0.01
         bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))

+ 25 - 25
odlab/models/head/yolof_head.py

@@ -2,11 +2,11 @@ import math
 import torch
 import torch.nn as nn
 
-from ..basic.conv import ConvModule
+from ..basic.conv import BasicConv
 
 
 class YolofHead(nn.Module):
-    def __init__(self, cfg, in_dim, out_dim, num_classes, num_cls_head=1, num_reg_head=1, act_type='relu', norm_type='BN'):
+    def __init__(self, cfg, in_dim, out_dim,):
         super().__init__()
         self.fmp_size = None
         self.ctr_clamp = cfg.center_clamp
@@ -15,12 +15,12 @@ class YolofHead(nn.Module):
         # ------------------ Basic parameters -------------------
         self.cfg = cfg
         self.in_dim = in_dim
-        self.num_classes = num_classes
-        self.num_cls_head=num_cls_head
-        self.num_reg_head=num_reg_head
-        self.act_type=act_type
-        self.norm_type=norm_type
-        self.stride = cfg.out_stride
+        self.stride       = cfg.out_stride
+        self.num_classes  = cfg.num_classes
+        self.num_cls_head = cfg.num_cls_head
+        self.num_reg_head = cfg.num_reg_head
+        self.act_type     = cfg.head_act
+        self.norm_type    = cfg.head_norm
         # Anchor config
         self.anchor_size = torch.as_tensor(cfg.anchor_size)
         self.num_anchors = len(cfg.anchor_size)
@@ -32,38 +32,38 @@ class YolofHead(nn.Module):
         for i in range(self.num_cls_head):
             if i == 0:
                 cls_heads.append(
-                    ConvModule(in_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(in_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
             else:
                 cls_heads.append(
-                    ConvModule(self.cls_head_dim, self.cls_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(self.cls_head_dim, self.cls_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
         ## reg head
         reg_heads = []
         self.reg_head_dim = out_dim
         for i in range(self.num_reg_head):
             if i == 0:
                 reg_heads.append(
-                    ConvModule(in_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(in_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
             else:
                 reg_heads.append(
-                    ConvModule(self.reg_head_dim, self.reg_head_dim, k=3, p=1, s=1, 
-                               act_type=self.act_type,
-                               norm_type=self.norm_type)
-                               )
+                    BasicConv(self.reg_head_dim, self.reg_head_dim,
+                              kernel_size=3, padding=1, stride=1, 
+                              act_type=self.act_type, norm_type=self.norm_type)
+                              )
         self.cls_heads = nn.Sequential(*cls_heads)
         self.reg_heads = nn.Sequential(*reg_heads)
 
         # pred layer
         self.obj_pred = nn.Conv2d(self.reg_head_dim, 1 * self.num_anchors, kernel_size=3, padding=1)
-        self.cls_pred = nn.Conv2d(self.cls_head_dim, num_classes * self.num_anchors, kernel_size=3, padding=1)
+        self.cls_pred = nn.Conv2d(self.cls_head_dim, self.num_classes * self.num_anchors, kernel_size=3, padding=1)
         self.reg_pred = nn.Conv2d(self.reg_head_dim, 4 * self.num_anchors, kernel_size=3, padding=1)
 
         # init bias

+ 4 - 23
odlab/models/neck/__init__.py

@@ -1,7 +1,6 @@
 from .dilated_encoder import DilatedEncoder
 from .fpn import BasicFPN
-from .spp import SPPF
-
+from typing import List
 
 # build neck
 def build_neck(cfg, in_dim, out_dim):
@@ -10,30 +9,12 @@ def build_neck(cfg, in_dim, out_dim):
 
     # ----------------------- Neck module -----------------------
     if cfg.neck == 'dilated_encoder':
-        model = DilatedEncoder(in_dim       = in_dim,
-                               out_dim      = out_dim,
-                               expand_ratio = cfg.neck_expand_ratio,
-                               dilations    = cfg.neck_dilations,
-                               act_type     = cfg.neck_act,
-                               norm_type    = cfg.neck_norm,
-                               )
-    elif cfg.neck == 'spp_block':
-        model = SPPF(in_dim       = in_dim,
-                     out_dim      = out_dim,
-                     expand_ratio = cfg.neck_expand_ratio,
-                     pooling_size = cfg.spp_pooling_size,
-                     act_type     = cfg.neck_act,
-                     norm_type    = cfg.neck_norm,
-                     )
+        model = DilatedEncoder(cfg, in_dim, out_dim)
         
     # ----------------------- FPN Neck -----------------------
     elif cfg.neck == 'basic_fpn':
-        model = BasicFPN(in_dims = in_dim,
-                         out_dim = out_dim,
-                         p6_feat = cfg.fpn_p6_feat,
-                         p7_feat = cfg.fpn_p7_feat,
-                         from_c5 = cfg.fpn_p6_from_c5, 
-                         )
+        assert isinstance(in_dim, List)
+        model = BasicFPN(cfg, in_dim, out_dim)
     else:
         raise NotImplementedError("Unknown Neck: <{}>".format(cfg.fpn))
         

+ 12 - 12
odlab/models/neck/dilated_encoder.py

@@ -1,7 +1,7 @@
 import torch.nn as nn
 from utils import weight_init
 
-from ..basic.conv import ConvModule
+from ..basic.conv import BasicConv
 
 
 # BottleNeck
@@ -15,9 +15,9 @@ class Bottleneck(nn.Module):
         inter_dim = round(in_dim * expand_ratio)
         # ------------------ Network parameters -------------------
         self.branch = nn.Sequential(
-            ConvModule(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type),
-            ConvModule(inter_dim, inter_dim, k=3, p=dilation, d=dilation, act_type=act_type, norm_type=norm_type),
-            ConvModule(inter_dim, in_dim, k=1, act_type=act_type, norm_type=norm_type)
+            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type),
+            BasicConv(inter_dim, inter_dim, kernel_size=3, padding=dilation, dilation=dilation, act_type=act_type, norm_type=norm_type),
+            BasicConv(inter_dim, in_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
         )
 
     def forward(self, x):
@@ -25,26 +25,27 @@ class Bottleneck(nn.Module):
 
 # Dilated Encoder
 class DilatedEncoder(nn.Module):
-    def __init__(self, in_dim, out_dim, expand_ratio, dilations=[2, 4, 6, 8], act_type='relu', norm_type='BN'):
+    def __init__(self, cfg, in_dim, out_dim):
         super(DilatedEncoder, self).__init__()
         # ------------------ Basic parameters -------------------
         self.in_dim = in_dim
         self.out_dim = out_dim
-        self.expand_ratio = expand_ratio
-        self.dilations = dilations
+        self.expand_ratio = cfg.neck_expand_ratio
+        self.dilations    = cfg.neck_dilations
+        self.act_type     = cfg.neck_act
+        self.norm_type    = cfg.neck_norm
         # ------------------ Network parameters -------------------
         ## proj layer
         self.projector = nn.Sequential(
-            ConvModule(in_dim, out_dim, k=1, act_type=None, norm_type=norm_type),
-            ConvModule(out_dim, out_dim, k=3, p=1, act_type=None, norm_type=norm_type)
+            BasicConv(in_dim, out_dim, kernel_size=1, act_type=None, norm_type=self.norm_type),
+            BasicConv(out_dim, out_dim, kernel_size=3, padding=1, act_type=None, norm_type=self.norm_type)
         )
         ## encoder layers
         self.encoders = nn.Sequential(
-            *[Bottleneck(out_dim, d, expand_ratio, act_type, norm_type) for d in dilations])
+            *[Bottleneck(out_dim, d, self.expand_ratio, self.act_type, self.norm_type) for d in self.dilations])
 
         self._init_weight()
 
-
     def _init_weight(self):
         for m in self.projector:
             if isinstance(m, nn.Conv2d):
@@ -64,7 +65,6 @@ class DilatedEncoder(nn.Module):
                 nn.init.constant_(m.weight, 1)
                 nn.init.constant_(m.bias, 0)
 
-
     def forward(self, x):
         x = self.projector(x)
         x = self.encoders(x)

+ 7 - 10
odlab/models/neck/fpn.py

@@ -6,18 +6,15 @@ from utils import weight_init
 
 # ------------------ Basic Feature Pyramid Network ------------------
 class BasicFPN(nn.Module):
-    def __init__(self, 
+    def __init__(self, cfg, 
                  in_dims=[512, 1024, 2048],
                  out_dim=256,
-                 p6_feat=False,
-                 p7_feat=False,
-                 from_c5=False,
                  ):
         super().__init__()
         # ------------------ Basic parameters -------------------
-        self.p6_feat = p6_feat
-        self.p7_feat = p7_feat
-        self.from_c5 = from_c5
+        self.p6_feat = cfg.fpn_p6_feat
+        self.p7_feat = cfg.fpn_p7_feat
+        self.from_c5 = cfg.fpn_p6_from_c5
 
         # ------------------ Network parameters -------------------
         ## latter layers
@@ -28,12 +25,12 @@ class BasicFPN(nn.Module):
             self.smooth_layers.append(nn.Conv2d(out_dim, out_dim, kernel_size=3, padding=1))
 
         ## P6/P7 layers
-        if p6_feat:
-            if from_c5:
+        if self.p6_feat:
+            if self.from_c5:
                 self.p6_conv = nn.Conv2d(in_dims[-1], out_dim, kernel_size=3, stride=2, padding=1)
             else: # from p5
                 self.p6_conv = nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=2, padding=1)
-        if p7_feat:
+        if self.p7_feat:
             self.p7_conv = nn.Sequential(
                 nn.ReLU(inplace=True),
                 nn.Conv2d(out_dim, out_dim, kernel_size=3, stride=2, padding=1)

+ 0 - 25
odlab/models/neck/spp.py

@@ -1,25 +0,0 @@
-import torch
-import torch.nn as nn
-
-from ..basic.conv import ConvModule
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, in_dim, out_dim, expand_ratio=0.5, pooling_size=5, act_type="relu", norm_type="BN"):
-        super().__init__()
-        inter_dim = int(in_dim * expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = ConvModule(in_dim, inter_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = ConvModule(inter_dim * 4, out_dim, k=1, act_type=act_type, norm_type=norm_type)
-        self.m = nn.MaxPool2d(kernel_size=pooling_size, stride=1, padding=pooling_size // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))

+ 15 - 5
odlab/train.py

@@ -85,12 +85,23 @@ def main():
     os.makedirs(path_to_save, exist_ok=True)
 
     # ---------------------------- Build DDP ----------------------------
-    distributed_utils.init_distributed_mode(args)
-    print("git:\n  {}\n".format(distributed_utils.get_sha()))
+    local_rank = local_process_rank = -1
+    if args.distributed:
+        distributed_utils.init_distributed_mode(args)
+        print("git:\n  {}\n".format(distributed_utils.get_sha()))
+        try:
+            # Multiple Mechine & Multiple GPUs (world size > 8)
+            local_rank = torch.distributed.get_rank()
+            local_process_rank = int(os.getenv('LOCAL_PROCESS_RANK', '0'))
+        except:
+            # Single Mechine & Multiple GPUs (world size <= 8)
+            local_rank = local_process_rank = torch.distributed.get_rank()
     world_size = distributed_utils.get_world_size()
-    print('World size: {}'.format(world_size))
     per_gpu_batch = args.batch_size // world_size
-
+    print("LOCAL RANK: ", local_rank)
+    print("LOCAL_PROCESS_RANL: ", local_process_rank)
+    print('WORLD SIZE: {}'.format(world_size))
+    
     # ---------------------------- Build CUDA ----------------------------
     if args.cuda and torch.cuda.is_available():
         print('use cuda')
@@ -103,7 +114,6 @@ def main():
 
     # ---------------------------- Build config ----------------------------
     cfg = build_config(args)
-    print('Model config: ', cfg)
 
     # ---------------------------- Build Dataset ----------------------------
     transforms = build_transform(cfg, is_train=True)

+ 1 - 1
odlab/utils/lr_scheduler.py

@@ -42,7 +42,7 @@ def build_lr_scheduler(cfg, optimizer, resume=None):
     print('LR Scheduler: {}'.format(cfg.lr_scheduler))
 
     if cfg.lr_scheduler == 'step':
-        assert 'lr_epoch' in cfg
+        assert hasattr(cfg, 'lr_epoch')
         print('--lr_epoch: {}'.format(cfg.lr_epoch))
         lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=cfg.lr_epoch)
     elif cfg.lr_scheduler == 'cosine':

+ 2 - 2
odlab/utils/optimizer.py

@@ -6,7 +6,7 @@ def build_optimizer(cfg, model, resume=None):
     print('==============================')
     print('Optimizer: {}'.format(cfg.optimizer))
     print('--base_lr: {}'.format(cfg.base_lr))
-    print('--backbone_lr_ratio: {}'.format(cfg.backbone_lr_ratio))
+    print('--backbone_lr_ratio: {}'.format(cfg.bk_lr_ratio))
     print('--momentum: {}'.format(cfg.momentum))
     print('--weight_decay: {}'.format(cfg.weight_decay))
 
@@ -14,7 +14,7 @@ def build_optimizer(cfg, model, resume=None):
         {"params": [p for n, p in model.named_parameters() if "backbone" not in n and p.requires_grad]},
         {
             "params": [p for n, p in model.named_parameters() if "backbone" in n and p.requires_grad],
-            "lr": cfg.base_lr * cfg.backbone_lr_ratio,
+            "lr": cfg.base_lr * cfg.bk_lr_ratio,
         },
     ]
 

+ 3 - 2
yolo/models/yolov1/yolov1_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov2/yolov2_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov3/yolov3_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov5/yolov5_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov5_af/yolov5_af_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov7_af/yolov7_af_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)

+ 3 - 2
yolo/models/yolov8/yolov8_basic.py

@@ -47,11 +47,12 @@ class BasicConv(nn.Module):
                 ):
         super(BasicConv, self).__init__()
         self.depthwise = depthwise
+        use_bias = False if norm_type is not None else True
         if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
+            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
             self.norm = get_norm(norm_type, out_dim)
         else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
+            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
             self.norm1 = get_norm(norm_type, in_dim)
             self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
             self.norm2 = get_norm(norm_type, out_dim)