Explorar o código

train YOLOv8-S with 300 epoch

yjh0410 hai 1 ano
pai
achega
5bcd4bc7e5

+ 1 - 1
yolo/config/yolov8_config.py

@@ -88,7 +88,7 @@ class Yolov8BaseConfig(object):
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
         self.lr_scheduler = "cosine"
-        self.max_epoch    = 500
+        self.max_epoch    = 300
         self.eval_epoch   = 10
         self.no_aug_epoch = 20
 

+ 1 - 1
yolo/models/rtdetr/basic_modules/conv.py

@@ -84,7 +84,7 @@ class BasicConv(nn.Module):
             # Depthwise conv
             x = self.norm1(self.conv1(x))
             # Pointwise conv
-            x = self.norm2(self.conv2(x))
+            x = self.act(self.norm2(self.conv2(x)))
             return x
 
 class Bottleneck(nn.Module):

+ 0 - 61
yolo/models/yolov7_af/README.md

@@ -1,61 +0,0 @@
-# Anchor-free YOLOv7:
-
-- VOC
-
-|     Model   | Batch | Scale | AP<sup>val<br>0.5 | Weight |  Logs  |
-|-------------|-------|-------|-------------------|--------|--------|
-| YOLOv7-AF-T | 1xb16 |  640  |       80.6        | [ckpt](https://github.com/yjh0410/YOLO-Tutorial-v7/releases/download/yolo_tutorial_ckpt/yolov7_af_t_voc.pth) | [log](https://github.com/yjh0410/YOLO-Tutorial-v7/releases/download/yolo_tutorial_ckpt/YOLOv7-AF-T-VOC.txt) |
-
-- COCO
-
-|    Model    | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |  Logs  |
-|-------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|--------|
-| YOLOv7-AF-T | 1xb16 |  640  |                    |               |   26.9            |   8.9             |  |  |
-
-- For training, we train redesigned YOLOv7-AF with 500 epochs on COCO. We also use the gradient accumulation.
-- For data augmentation, we use the RandomAffine, RandomHSV, Mosaic and YOLOX's Mixup augmentation.
-- For optimizer, we use AdamW with weight decay of 0.05 and per image base lr of 0.001 / 64.
-- For learning rate scheduler, we use cosine decay scheduler.
-- For batch size, we set it to 16, and we also use the gradient accumulation to approximate batch size of 256.
-
-
-## Train YOLOv7-AF
-### Single GPU
-Taking training YOLOv7-AF-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m yolov7_af_s -bs 16 --fp16 
-```
-
-### Multi GPU
-Taking training YOLOv7-AF-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda --distributed -d coco --root path/to/coco -m yolov7_af_s -bs 16 --fp16 
-```
-
-## Test YOLOv7-AF
-Taking testing YOLOv7-AF-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m yolov7_af_s --weight path/to/yolov7.pth --show 
-```
-
-## Evaluate YOLOv7-AF
-Taking evaluating YOLOv7-AF-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco --root path/to/coco -m yolov7_af_s --weight path/to/yolov7.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m yolov7_af_s --weight path/to/weight --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m yolov7_af_s --weight path/to/weight --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m yolov7_af_s --weight path/to/weight --show --gif
-```

+ 0 - 24
yolo/models/yolov7_af/build.py

@@ -1,24 +0,0 @@
-import torch.nn as nn
-
-from .loss import SetCriterion
-from .yolov7_af import Yolov7AF
-
-
-# build object detector
-def build_yolov7af(cfg, is_val=False):
-    # -------------- Build YOLO --------------
-    model = Yolov7AF(cfg, is_val)
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if is_val:
-        # build criterion for training
-        criterion = SetCriterion(cfg)
-        
-    return model, criterion

+ 0 - 141
yolo/models/yolov7_af/loss.py

@@ -1,141 +0,0 @@
-import torch
-import torch.nn.functional as F
-from utils.box_ops import get_ious
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import YoloxMatcher
-
-
-class SetCriterion(object):
-    def __init__(self, cfg):
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        self.loss_obj_weight = cfg.loss_obj
-        self.loss_cls_weight = cfg.loss_cls
-        self.loss_box_weight = cfg.loss_box
-        # matcher
-        self.matcher = YoloxMatcher(cfg.num_classes, cfg.ota_center_sampling_radius, cfg.ota_topk_candidate)
-
-    def loss_objectness(self, pred_obj, gt_obj):
-        loss_obj = F.binary_cross_entropy_with_logits(pred_obj, gt_obj, reduction='none')
-
-        return loss_obj
-    
-    def loss_classes(self, pred_cls, gt_label):
-        loss_cls = F.binary_cross_entropy_with_logits(pred_cls, gt_label, reduction='none')
-
-        return loss_cls
-
-    def loss_bboxes(self, pred_box, gt_box):
-        # regression loss
-        ious = get_ious(pred_box, gt_box, "xyxy", 'giou')
-        loss_box = 1.0 - ious
-
-        return loss_box
-
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_obj']: List(Tensor) [B, M, 1]
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_reg']: List(Tensor) [B, M, 4]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs = outputs['pred_cls'][0].shape[0]
-        device = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        anchors = outputs['anchors']
-        # preds: [B, M, C]
-        obj_preds = torch.cat(outputs['pred_obj'], dim=1)
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-
-        # label assignment
-        cls_targets = []
-        box_targets = []
-        obj_targets = []
-        fg_masks = []
-
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)
-
-            # check target
-            if len(tgt_labels) == 0 or tgt_bboxes.max().item() == 0.:
-                num_anchors = sum([ab.shape[0] for ab in anchors])
-                # There is no valid gt
-                cls_target = obj_preds.new_zeros((0, self.num_classes))
-                box_target = obj_preds.new_zeros((0, 4))
-                obj_target = obj_preds.new_zeros((num_anchors, 1))
-                fg_mask = obj_preds.new_zeros(num_anchors).bool()
-            else:
-                (
-                    fg_mask,
-                    assigned_labels,
-                    assigned_ious,
-                    assigned_indexs
-                ) = self.matcher(
-                    fpn_strides = fpn_strides,
-                    anchors = anchors,
-                    pred_obj = obj_preds[batch_idx],
-                    pred_cls = cls_preds[batch_idx], 
-                    pred_box = box_preds[batch_idx],
-                    tgt_labels = tgt_labels,
-                    tgt_bboxes = tgt_bboxes
-                    )
-
-                obj_target = fg_mask.unsqueeze(-1)
-                cls_target = F.one_hot(assigned_labels.long(), self.num_classes)
-                cls_target = cls_target * assigned_ious.unsqueeze(-1)
-                box_target = tgt_bboxes[assigned_indexs]
-
-            cls_targets.append(cls_target)
-            box_targets.append(box_target)
-            obj_targets.append(obj_target)
-            fg_masks.append(fg_mask)
-
-        cls_targets = torch.cat(cls_targets, 0)
-        box_targets = torch.cat(box_targets, 0)
-        obj_targets = torch.cat(obj_targets, 0)
-        fg_masks = torch.cat(fg_masks, 0)
-        num_fgs = fg_masks.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0)
-
-        # ------------------ Objecntness loss ------------------
-        loss_obj = self.loss_objectness(obj_preds.view(-1, 1), obj_targets.float())
-        loss_obj = loss_obj.sum() / num_fgs
-        
-        # ------------------ Classification loss ------------------
-        cls_preds_pos = cls_preds.view(-1, self.num_classes)[fg_masks]
-        loss_cls = self.loss_classes(cls_preds_pos, cls_targets)
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[fg_masks]
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets)
-        loss_box = loss_box.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_obj_weight * loss_obj + \
-                 self.loss_cls_weight * loss_cls + \
-                 self.loss_box_weight * loss_box
-
-        # Loss dict
-        loss_dict = dict(
-                loss_obj = loss_obj,
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                losses = losses
-        )
-
-        return loss_dict
-
-
-if __name__ == "__main__":
-    pass

+ 0 - 185
yolo/models/yolov7_af/matcher.py

@@ -1,185 +0,0 @@
-# ---------------------------------------------------------------------
-# Copyright (c) Megvii Inc. All rights reserved.
-# ---------------------------------------------------------------------
-
-
-import torch
-import torch.nn.functional as F
-from utils.box_ops import *
-
-
-class YoloxMatcher(object):
-    """
-        This code referenced to https://github.com/Megvii-BaseDetection/YOLOX/blob/main/yolox/models/yolo_head.py
-    """
-    def __init__(self, num_classes, center_sampling_radius, topk_candidate ):
-        self.num_classes = num_classes
-        self.center_sampling_radius = center_sampling_radius
-        self.topk_candidate = topk_candidate
-
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_obj, 
-                 pred_cls, 
-                 pred_box, 
-                 tgt_labels,
-                 tgt_bboxes):
-        # [M,]
-        strides_tensor = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        anchors = torch.cat(anchors, dim=0)
-        num_anchor = anchors.shape[0]        
-        num_gt = len(tgt_labels)
-
-        # ----------------------- Find inside points -----------------------
-        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
-            tgt_bboxes, anchors, strides_tensor, num_anchor, num_gt)
-        obj_preds = pred_obj[fg_mask].float()   # [Mp, 1]
-        cls_preds = pred_cls[fg_mask].float()   # [Mp, C]
-        box_preds = pred_box[fg_mask].float()   # [Mp, 4]
-
-        # ----------------------- Reg cost -----------------------
-        pair_wise_ious, _ = box_iou(tgt_bboxes, box_preds)      # [N, Mp]
-        reg_cost = -torch.log(pair_wise_ious + 1e-8)            # [N, Mp]
-
-        # ----------------------- Cls cost -----------------------
-        with torch.cuda.amp.autocast(enabled=False):
-            # [Mp, C]
-            score_preds = torch.sqrt(obj_preds.sigmoid_()* cls_preds.sigmoid_())
-            # [N, Mp, C]
-            score_preds = score_preds.unsqueeze(0).repeat(num_gt, 1, 1)
-            # prepare cls_target
-            cls_targets = F.one_hot(tgt_labels.long(), self.num_classes).float()
-            cls_targets = cls_targets.unsqueeze(1).repeat(1, score_preds.size(1), 1)
-            # [N, Mp]
-            cls_cost = F.binary_cross_entropy(score_preds, cls_targets, reduction="none").sum(-1)
-        del score_preds
-
-        #----------------------- Dynamic K-Matching -----------------------
-        cost_matrix = (
-            cls_cost
-            + 3.0 * reg_cost
-            + 100000.0 * (~is_in_boxes_and_center)
-        ) # [N, Mp]
-
-        (
-            assigned_labels,         # [num_fg,]
-            assigned_ious,           # [num_fg,]
-            assigned_indexs,         # [num_fg,]
-        ) = self.dynamic_k_matching(
-            cost_matrix,
-            pair_wise_ious,
-            tgt_labels,
-            num_gt,
-            fg_mask
-            )
-        del cls_cost, cost_matrix, pair_wise_ious, reg_cost
-
-        return fg_mask, assigned_labels, assigned_ious, assigned_indexs
-
-    def get_in_boxes_info(
-        self,
-        gt_bboxes,   # [N, 4]
-        anchors,     # [M, 2]
-        strides,     # [M,]
-        num_anchors, # M
-        num_gt,      # N
-        ):
-        # anchor center
-        x_centers = anchors[:, 0]
-        y_centers = anchors[:, 1]
-
-        # [M,] -> [1, M] -> [N, M]
-        x_centers = x_centers.unsqueeze(0).repeat(num_gt, 1)
-        y_centers = y_centers.unsqueeze(0).repeat(num_gt, 1)
-
-        # [N,] -> [N, 1] -> [N, M]
-        gt_bboxes_l = gt_bboxes[:, 0].unsqueeze(1).repeat(1, num_anchors) # x1
-        gt_bboxes_t = gt_bboxes[:, 1].unsqueeze(1).repeat(1, num_anchors) # y1
-        gt_bboxes_r = gt_bboxes[:, 2].unsqueeze(1).repeat(1, num_anchors) # x2
-        gt_bboxes_b = gt_bboxes[:, 3].unsqueeze(1).repeat(1, num_anchors) # y2
-
-        b_l = x_centers - gt_bboxes_l
-        b_r = gt_bboxes_r - x_centers
-        b_t = y_centers - gt_bboxes_t
-        b_b = gt_bboxes_b - y_centers
-        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
-
-        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
-        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
-        # in fixed center
-        center_radius = self.center_sampling_radius
-
-        # [N, 2]
-        gt_centers = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) * 0.5
-        
-        # [1, M]
-        center_radius_ = center_radius * strides.unsqueeze(0)
-
-        gt_bboxes_l = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # x1
-        gt_bboxes_t = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) - center_radius_ # y1
-        gt_bboxes_r = gt_centers[:, 0].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # x2
-        gt_bboxes_b = gt_centers[:, 1].unsqueeze(1).repeat(1, num_anchors) + center_radius_ # y2
-
-        c_l = x_centers - gt_bboxes_l
-        c_r = gt_bboxes_r - x_centers
-        c_t = y_centers - gt_bboxes_t
-        c_b = gt_bboxes_b - y_centers
-        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
-        is_in_centers = center_deltas.min(dim=-1).values > 0.0
-        is_in_centers_all = is_in_centers.sum(dim=0) > 0
-
-        # in boxes and in centers
-        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
-
-        is_in_boxes_and_center = (
-            is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]
-        )
-        return is_in_boxes_anchor, is_in_boxes_and_center
-
-    def dynamic_k_matching(
-        self, 
-        cost, 
-        pair_wise_ious, 
-        gt_classes, 
-        num_gt, 
-        fg_mask
-        ):
-        # Dynamic K
-        # ---------------------------------------------------------------
-        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
-
-        ious_in_boxes_matrix = pair_wise_ious
-        n_candidate_k = min(self.topk_candidate, ious_in_boxes_matrix.size(1))
-        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-        dynamic_ks = dynamic_ks.tolist()
-        for gt_idx in range(num_gt):
-            _, pos_idx = torch.topk(
-                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False
-            )
-            matching_matrix[gt_idx][pos_idx] = 1
-
-        del topk_ious, dynamic_ks, pos_idx
-
-        anchor_matching_gt = matching_matrix.sum(0)
-        if (anchor_matching_gt > 1).sum() > 0:
-            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
-            matching_matrix[:, anchor_matching_gt > 1] *= 0
-            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-
-        fg_mask[fg_mask.clone()] = fg_mask_inboxes
-
-        assigned_indexs = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        assigned_labels = gt_classes[assigned_indexs]
-
-        assigned_ious = (matching_matrix * pair_wise_ious).sum(0)[
-            fg_mask_inboxes
-        ]
-        return assigned_labels, assigned_ious, assigned_indexs
-    

+ 0 - 158
yolo/models/yolov7_af/yolov7_af.py

@@ -1,158 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .yolov7_af_backbone import Yolov7TBackbone, Yolov7LBackbone
-from .yolov7_af_neck     import SPPFBlockCSP
-from .yolov7_af_pafpn    import Yolov7PaFPN
-from .yolov7_af_head     import Yolov7DetHead
-from .yolov7_af_pred     import Yolov7AFDetPredLayer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# Yolov7AF
-class Yolov7AF(nn.Module):
-    def __init__(self,
-                 cfg,
-                 is_val = False,
-                 ) -> None:
-        super(Yolov7AF, self).__init__()
-        # ---------------------- Basic setting ----------------------
-        assert cfg.scale in ["t", "l", "x"]
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        ## Post-process parameters
-        self.topk_candidates  = cfg.val_topk        if is_val else cfg.test_topk
-        self.conf_thresh      = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
-        self.nms_thresh       = cfg.val_nms_thresh  if is_val else cfg.test_nms_thresh
-        self.no_multi_labels  = False if is_val else True
-        
-        # ---------------------- Network Parameters ----------------------
-        ## Backbone
-        self.backbone = Yolov7TBackbone(cfg) if cfg.scale == "t" else Yolov7LBackbone(cfg)
-        self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
-        ## Neck: SPP
-        self.neck     = SPPFBlockCSP(cfg, self.pyramid_feat_dims[-1], self.pyramid_feat_dims[-1]//2)
-        self.pyramid_feat_dims[-1] = self.neck.out_dim
-        ## Neck: FPN
-        self.fpn      = Yolov7PaFPN(cfg, self.pyramid_feat_dims)
-        ## Head
-        self.head     = Yolov7DetHead(cfg, self.fpn.out_dims)
-        ## Pred
-        self.pred     = Yolov7AFDetPredLayer(cfg)
-
-    def post_process(self, obj_preds, cls_preds, box_preds):
-        """
-        We process predictions at each scale hierarchically
-        Input:
-            obj_preds: List[torch.Tensor] -> [[B, M, 1], ...], B=1
-            cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
-            box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for obj_pred_i, cls_pred_i, box_pred_i in zip(obj_preds, cls_preds, box_preds):
-            obj_pred_i = obj_pred_i[0]
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(
-                    torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid()), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = torch.sqrt(obj_pred_i.sigmoid() * cls_pred_i.sigmoid()).flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes)
-        
-        return bboxes, scores, labels
-    
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-        
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.head(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred(cls_feats, reg_feats)
-        outputs['image_size'] = [x.shape[2], x.shape[3]]
-
-        if not self.training:
-            all_obj_preds = outputs['pred_obj']
-            all_cls_preds = outputs['pred_cls']
-            all_box_preds = outputs['pred_box']
-
-            # post process
-            bboxes, scores, labels = self.post_process(all_obj_preds, all_cls_preds, all_box_preds)
-            outputs = {
-                "scores": scores,
-                "labels": labels,
-                "bboxes": bboxes
-            }
-        
-        return outputs 

+ 0 - 243
yolo/models/yolov7_af/yolov7_af_backbone.py

@@ -1,243 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolov7_af_basic import BasicConv, MDown, ELANLayer
-except:
-    from  yolov7_af_basic import BasicConv, MDown, ELANLayer
-
-# IN1K pretrained weight
-pretrained_urls = {
-    't': "https://github.com/yjh0410/ICLab/releases/download/in1k_pretrained/elannet_t_in1k_63.2.pth",
-    'l': None,
-    'x': None,
-}
-
-# ELANNet-Tiny
-class Yolov7TBackbone(nn.Module):
-    def __init__(self, cfg):
-        super(Yolov7TBackbone, self).__init__()
-        # ---------------- Basic parameters ----------------
-        self.model_scale = cfg.scale
-        self.bk_act = cfg.bk_act
-        self.bk_norm = cfg.bk_norm
-        self.bk_depthwise = cfg.bk_depthwise
-        self.elan_depth = 1
-        self.feat_dims = [round(64  * cfg.width), round(128  * cfg.width),
-                          round(256  * cfg.width), round(512 * cfg.width), round(1024 * cfg.width)]
-
-        # ---------------- Model parameters ----------------
-        self.layer_1 = self.make_stem(3, self.feat_dims[0])
-        self.layer_2 = self.make_block(self.feat_dims[0], self.feat_dims[1], expansion=0.5, downsample="conv")
-        self.layer_3 = self.make_block(self.feat_dims[1], self.feat_dims[2], expansion=0.5, downsample="maxpool")
-        self.layer_4 = self.make_block(self.feat_dims[2], self.feat_dims[3], expansion=0.5, downsample="maxpool")
-        self.layer_5 = self.make_block(self.feat_dims[3], self.feat_dims[4], expansion=0.5, downsample="maxpool")
-
-        # Initialize all layers
-        # Initialize all layers
-        self.init_weights()
-        
-        # Load imagenet pretrained weight
-        if cfg.use_pretrained:
-            self.load_pretrained()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def load_pretrained(self):
-        url = pretrained_urls[self.model_scale]
-        if url is not None:
-            print('Loading backbone pretrained weight from : {}'.format(url))
-            # checkpoint state dict
-            checkpoint = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            checkpoint_state_dict = checkpoint.pop("model")
-            # model state dict
-            model_state_dict = self.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print('Unused key: ', k)
-            # load the weight
-            self.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No pretrained weight for model scale: {}.'.format(self.model_scale))
-
-    def make_stem(self, in_dim, out_dim):
-        stem = BasicConv(in_dim, out_dim, kernel_size=6, padding=2, stride=2,
-                         act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise)
-        
-        return stem
-
-    def make_block(self, in_dim, out_dim, expansion=0.5, downsample="maxpool"):
-        if downsample == "maxpool":
-            block = nn.Sequential(
-                nn.MaxPool2d((2, 2), stride=2),             
-                ELANLayer(in_dim, out_dim, expansion=expansion, num_blocks=self.elan_depth,
-                          act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-                          )
-        elif downsample == "conv":
-            block = nn.Sequential(
-                BasicConv(in_dim, out_dim, kernel_size=3, padding=1, stride=2,
-                          act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),             
-                ELANLayer(out_dim, out_dim, expansion=expansion, num_blocks=self.elan_depth,
-                          act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-                          )
-        else:
-            raise NotImplementedError("Unknown downsample type: {}".format(downsample))
-
-        return block
-    
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-# ELANNet-Large
-class Yolov7LBackbone(nn.Module):
-    def __init__(self, cfg):
-        super(Yolov7LBackbone, self).__init__()
-        # ---------------- Basic parameters ----------------
-        self.model_scale = cfg.scale
-        self.bk_act = cfg.bk_act
-        self.bk_norm = cfg.bk_norm
-        self.bk_depthwise = cfg.bk_depthwise
-        self.elan_depth = 2
-        self.feat_dims = [round(64  * cfg.width), round(128  * cfg.width), round(256  * cfg.width),
-                          round(512  * cfg.width), round(1024 * cfg.width), round(1024 * cfg.width)]
-
-        # ---------------- Model parameters ----------------
-        self.layer_1 = self.make_stem(3, self.feat_dims[0])
-        self.layer_2 = self.make_block(self.feat_dims[0], self.feat_dims[1], self.feat_dims[2], expansion=0.5, conv_downsample=True)
-        self.layer_3 = self.make_block(self.feat_dims[2], self.feat_dims[2], self.feat_dims[3], expansion=0.5)
-        self.layer_4 = self.make_block(self.feat_dims[3], self.feat_dims[3], self.feat_dims[4], expansion=0.5)
-        self.layer_5 = self.make_block(self.feat_dims[4], self.feat_dims[4], self.feat_dims[5], expansion=0.25)
-
-        # Initialize all layers
-        self.init_weights()
-        
-        # Load imagenet pretrained weight
-        if cfg.use_pretrained:
-            self.load_pretrained()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def load_pretrained(self):
-        url = pretrained_urls[self.model_scale]
-        if url is not None:
-            print('Loading backbone pretrained weight from : {}'.format(url))
-            # checkpoint state dict
-            checkpoint = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            checkpoint_state_dict = checkpoint.pop("model")
-            # model state dict
-            model_state_dict = self.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print('Unused key: ', k)
-            # load the weight
-            self.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No pretrained weight for model scale: {}.'.format(self.model_scale))
-
-    def make_stem(self, in_dim, out_dim):
-        stem = nn.Sequential(
-            BasicConv(in_dim, out_dim//2, kernel_size=3, padding=1, stride=1,
-                        act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-            BasicConv(out_dim//2, out_dim, kernel_size=3, padding=1, stride=2,
-                        act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-            BasicConv(out_dim, out_dim, kernel_size=3, padding=1, stride=1,
-                        act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise)
-
-        )
-
-        return stem
-
-    def make_block(self, in_dim, out_dim_1, out_dim_2, expansion=0.5, conv_downsample=False):
-        if conv_downsample:
-            block = nn.Sequential(
-                BasicConv(in_dim, out_dim_1, kernel_size=3, padding=1, stride=2,
-                         act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),             
-                ELANLayer(out_dim_1, out_dim_2,
-                        expansion=expansion, num_blocks=self.elan_depth,
-                        act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-        )
-        else:
-            block = nn.Sequential(
-                MDown(in_dim, out_dim_1,
-                    act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),             
-                ELANLayer(out_dim_1, out_dim_2,
-                        expansion=expansion, num_blocks=self.elan_depth,
-                        act_type=self.bk_act, norm_type=self.bk_norm, depthwise=self.bk_depthwise),
-            )
-        
-        return block
-    
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    class BaseConfig(object):
-        def __init__(self) -> None:
-            self.bk_act = 'silu'
-            self.bk_norm = 'BN'
-            self.bk_depthwise = False
-            self.use_pretrained = False
-            self.width = 0.5
-            self.scale = "t"
-
-    cfg = BaseConfig()
-    model = Yolov7TBackbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 185
yolo/models/yolov7_af/yolov7_af_basic.py

@@ -1,185 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-
-# --------------------- Basic modules ---------------------
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 dilation=1,               # dilation
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                 depthwise :bool = False
-                ):
-        super(BasicConv, self).__init__()
-        self.depthwise = depthwise
-        use_bias = False if norm_type is not None else True
-        if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1, bias=use_bias)
-            self.norm = get_norm(norm_type, out_dim)
-        else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim, bias=use_bias)
-            self.norm1 = get_norm(norm_type, in_dim)
-            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
-            self.norm2 = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        if not self.depthwise:
-            return self.act(self.norm(self.conv(x)))
-        else:
-            # Depthwise conv
-            x = self.norm1(self.conv1(x))
-            # Pointwise conv
-            x = self.act(self.norm2(self.conv2(x)))
-            return x
-
-
-# ---------------------------- Basic Modules ----------------------------
-class MDown(nn.Module):
-    def __init__(self,
-                 in_dim    :int,
-                 out_dim   :int,
-                 act_type  :str   = 'silu',
-                 norm_type :str   = 'BN',
-                 depthwise :bool  = False,
-                 ) -> None:
-        super().__init__()
-        inter_dim = out_dim // 2
-        self.downsample_1 = nn.Sequential(
-            nn.MaxPool2d((2, 2), stride=2),
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        )
-        self.downsample_2 = nn.Sequential(
-            BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type),
-            BasicConv(inter_dim, inter_dim,
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        )
-
-    def forward(self, x):
-        x1 = self.downsample_1(x)
-        x2 = self.downsample_2(x)
-
-        return torch.cat([x1, x2], dim=1)
-
-class ELANLayer(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expansion  :float = 0.5,
-                 num_blocks :int   = 1,
-                 act_type   :str   = 'silu',
-                 norm_type  :str   = 'BN',
-                 depthwise  :bool  = False,
-                 ) -> None:
-        super(ELANLayer, self).__init__()
-        self.inter_dim = round(in_dim * expansion)
-        self.conv_layer_1 = BasicConv(in_dim, self.inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_2 = BasicConv(in_dim, self.inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_3 = BasicConv(self.inter_dim * 4, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.elan_layer_1 = nn.Sequential(*[BasicConv(self.inter_dim, self.inter_dim,
-                                                      kernel_size=3, padding=1,
-                                                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                      for _ in range(num_blocks)])
-        self.elan_layer_2 = nn.Sequential(*[BasicConv(self.inter_dim, self.inter_dim,
-                                                      kernel_size=3, padding=1,
-                                                      act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-                                                      for _ in range(num_blocks)])
-
-    def forward(self, x):
-        # Input proj
-        x1 = self.conv_layer_1(x)
-        x2 = self.conv_layer_2(x)
-        x3 = self.elan_layer_1(x2)
-        x4 = self.elan_layer_2(x3)
-    
-        out = self.conv_layer_3(torch.cat([x1, x2, x3, x4], dim=1))
-
-        return out
-
-## PaFPN's ELAN-Block proposed by YOLOv7
-class ELANLayerFPN(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 expansions   :List = [0.5, 0.5],
-                 branch_width :int  = 4,
-                 branch_depth :int  = 1,
-                 act_type     :str  = 'silu',
-                 norm_type    :str  = 'BN',
-                 depthwise=False):
-        super(ELANLayerFPN, self).__init__()
-        # Basic parameters
-        inter_dim  = round(in_dim * expansions[0])
-        inter_dim2 = round(inter_dim * expansions[1]) 
-        # Network structure
-        self.cv1 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.cv2 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.cv3 = nn.ModuleList()
-        for idx in range(round(branch_width)):
-            if idx == 0:
-                cvs = [BasicConv(inter_dim, inter_dim2,
-                                 kernel_size=3, padding=1,
-                                 act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            else:
-                cvs = [BasicConv(inter_dim2, inter_dim2,
-                                 kernel_size=3, padding=1,
-                                 act_type=act_type, norm_type=norm_type, depthwise=depthwise)]
-            # deeper
-            if round(branch_depth) > 1:
-                for _ in range(1, round(branch_depth)):
-                    cvs.append(BasicConv(inter_dim2, inter_dim2, kernel_size=3, padding=1, act_type=act_type, norm_type=norm_type, depthwise=depthwise))
-                self.cv3.append(nn.Sequential(*cvs))
-            else:
-                self.cv3.append(cvs[0])
-
-        self.output_proj = BasicConv(inter_dim*2+inter_dim2*len(self.cv3), out_dim,
-                                     kernel_size=1, act_type=act_type, norm_type=norm_type)
-
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.cv2(x)
-        inter_outs = [x1, x2]
-        for m in self.cv3:
-            y1 = inter_outs[-1]
-            y2 = m(y1)
-            inter_outs.append(y2)
-        out = self.output_proj(torch.cat(inter_outs, dim=1))
-
-        return out

+ 0 - 173
yolo/models/yolov7_af/yolov7_af_head.py

@@ -1,173 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolov7_af_basic import BasicConv
-except:
-    from  yolov7_af_basic import BasicConv
-
-
-## Single-level Detection Head
-class DetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 depthwise    :bool = False):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                reg_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class Yolov7DetHead(nn.Module):
-    def __init__(self, cfg, in_dims):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [DetHead(in_dim       = in_dims[level],
-                     cls_head_dim = round(cfg.head_dim * cfg.width),
-                     reg_head_dim = round(cfg.head_dim * cfg.width),
-                     num_cls_head = cfg.num_cls_head,
-                     num_reg_head = cfg.num_reg_head,
-                     act_type     = cfg.head_act,
-                     norm_type    = cfg.head_norm,
-                     depthwise    = cfg.head_depthwise)
-                     for level in range(cfg.num_levels)
-                     ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = cfg.head_dim
-        self.reg_head_dim = cfg.head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-
-
-if __name__=='__main__':
-    import time
-    from thop import profile
-    # Model config
-    
-    # YOLOv7-Base config
-    class Yolov7BaseConfig(object):
-        def __init__(self) -> None:
-            # ---------------- Model config ----------------
-            self.width    = 0.50
-            self.out_stride = [8, 16, 32]
-            self.max_stride = 32
-            self.num_levels = 3
-            ## Head
-            self.head_act  = 'lrelu'
-            self.head_norm = 'BN'
-            self.head_depthwise = False
-            self.head_dim  = 256
-            self.num_cls_head   = 2
-            self.num_reg_head   = 2
-
-    cfg = Yolov7BaseConfig()
-    # Build a head
-    pyramid_feats = [torch.randn(1, cfg.head_dim, 80, 80),
-                     torch.randn(1, cfg.head_dim, 40, 40),
-                     torch.randn(1, cfg.head_dim, 20, 20)]
-    head = Yolov7DetHead(cfg, [cfg.head_dim]*3)
-
-
-    # Inference
-    t0 = time.time()
-    cls_feats, reg_feats = head(pyramid_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    print("====== Yolov7 Head output ======")
-    for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
-        print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
-
-    flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))
-      

+ 0 - 119
yolo/models/yolov7_af/yolov7_af_neck.py

@@ -1,119 +0,0 @@
-import torch
-import torch.nn as nn
-from .yolov7_af_basic import BasicConv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv7-AF by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov7-AF
-    """
-    def __init__(self, cfg, in_dim, out_dim, expansion=0.5):
-        super().__init__()
-        ## ----------- Basic Parameters -----------
-        inter_dim = round(in_dim * expansion)
-        self.out_dim = out_dim
-        ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
-                              stride=1,
-                              padding=cfg.spp_pooling_size // 2)
-
-        # Initialize all layers
-        self.init_weights()
-
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
-
-# SPPF block with CSP module
-class SPPFBlockCSP(nn.Module):
-    """
-        CSP Spatial Pyramid Pooling Block
-    """
-    def __init__(self, cfg, in_dim, out_dim):
-        super(SPPFBlockCSP, self).__init__()
-        inter_dim = int(in_dim * cfg.neck_expand_ratio)
-        self.out_dim = out_dim
-        self.cv1 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.module = nn.Sequential(
-            BasicConv(inter_dim, inter_dim, kernel_size=3, padding=1, 
-                      act_type=cfg.neck_act, norm_type=cfg.neck_norm, depthwise=cfg.neck_depthwise),
-            SPPF(cfg, inter_dim, inter_dim, expansion=1.0),
-            BasicConv(inter_dim, inter_dim, kernel_size=3, padding=1, 
-                      act_type=cfg.neck_act, norm_type=cfg.neck_norm, depthwise=cfg.neck_depthwise),
-                      )
-        self.cv3 = BasicConv(inter_dim * 2, self.out_dim, kernel_size=1, act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-
-        # Initialize all layers
-        self.init_weights()
-
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        x1 = self.cv1(x)
-        x2 = self.module(self.cv2(x))
-        y = self.cv3(torch.cat([x1, x2], dim=1))
-
-        return y
-
-
-if __name__=='__main__':
-    import time
-    from thop import profile
-    # Model config
-    
-    # YOLOv7-AF-Base config
-    class Yolov7AFBaseConfig(object):
-        def __init__(self) -> None:
-            # ---------------- Model config ----------------
-            self.out_stride = 32
-            self.max_stride = 32
-            ## Neck
-            self.neck_act       = 'lrelu'
-            self.neck_norm      = 'BN'
-            self.neck_depthwise = False
-            self.neck_expand_ratio = 0.5
-            self.spp_pooling_size  = 5
-
-    cfg = Yolov7AFBaseConfig()
-    # Build a head
-    in_dim  = 512
-    out_dim = 512
-    neck = SPPF(cfg, in_dim, out_dim)
-
-    # Inference
-    x = torch.randn(1, in_dim, 20, 20)
-    t0 = time.time()
-    output = neck(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    print('Neck output: ', output.shape)
-
-    flops, params = profile(neck, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 184
yolo/models/yolov7_af/yolov7_af_pafpn.py

@@ -1,184 +0,0 @@
-from typing import List
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-try:
-    from .yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
-except:
-    from  yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
-
-
-# Yolov7 af PaFPN
-class Yolov7PaFPN(nn.Module):
-    def __init__(self, cfg, in_dims: List = [512, 1024, 512]):
-        super(Yolov7PaFPN, self).__init__()
-        # ----------------------------- Basic parameters -----------------------------
-        self.in_dims = in_dims
-        c3, c4, c5 = in_dims
-
-        # ----------------------------- Yolov7's Top-down FPN -----------------------------
-        ## P5 -> P4
-        self.reduce_layer_1 = BasicConv(c5, round(256*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.reduce_layer_2 = BasicConv(c4, round(256*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_1 = ELANLayerFPN(in_dim       = round(256*cfg.width) + round(256*cfg.width),
-                                             out_dim      = round(256*cfg.width),
-                                             expansions   = cfg.fpn_expansions,
-                                             branch_width = cfg.fpn_block_bw,
-                                             branch_depth = cfg.fpn_block_dw,
-                                             act_type     = cfg.fpn_act,
-                                             norm_type    = cfg.fpn_norm,
-                                             depthwise    = cfg.fpn_depthwise,
-                                             )
-        ## P4 -> P3
-        self.reduce_layer_3 = BasicConv(round(256*cfg.width), round(128*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.reduce_layer_4 = BasicConv(c3, round(128*cfg.width),
-                                        kernel_size=1, act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_2 = ELANLayerFPN(in_dim       = round(128*cfg.width) + round(128*cfg.width),
-                                             out_dim      = round(128*cfg.width),
-                                             expansions   = cfg.fpn_expansions,
-                                             branch_width = cfg.fpn_block_bw,
-                                             branch_depth = cfg.fpn_block_dw,
-                                             act_type     = cfg.fpn_act,
-                                             norm_type    = cfg.fpn_norm,
-                                             depthwise    = cfg.fpn_depthwise,
-                                             )
-        # ----------------------------- Yolov7's Bottom-up PAN -----------------------------
-        ## P3 -> P4
-        self.downsample_layer_1 = MDown(round(128*cfg.width), round(256*cfg.width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.bottom_up_layer_1 = ELANLayerFPN(in_dim       = round(256*cfg.width) + round(256*cfg.width),
-                                              out_dim      = round(256*cfg.width),
-                                              expansions   = cfg.fpn_expansions,
-                                              branch_width = cfg.fpn_block_bw,
-                                              branch_depth = cfg.fpn_block_dw,
-                                              act_type     = cfg.fpn_act,
-                                              norm_type    = cfg.fpn_norm,
-                                              depthwise    = cfg.fpn_depthwise,
-                                              )
-        ## P4 -> P5
-        self.downsample_layer_2 = MDown(round(256*cfg.width), round(512*cfg.width),
-                                        act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.bottom_up_layer_2 = ELANLayerFPN(in_dim       = round(512*cfg.width) + c5,
-                                              out_dim      = round(512*cfg.width),
-                                              expansions   = cfg.fpn_expansions,
-                                              branch_width = cfg.fpn_block_bw,
-                                              branch_depth = cfg.fpn_block_dw,
-                                              act_type     = cfg.fpn_act,
-                                              norm_type    = cfg.fpn_norm,
-                                              depthwise    = cfg.fpn_depthwise,
-                                              )
-
-        # ----------------------------- Head conv layers -----------------------------
-        ## Head convs
-        self.head_conv_1 = BasicConv(round(128*cfg.width), round(256*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.head_conv_2 = BasicConv(round(256*cfg.width), round(512*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.head_conv_3 = BasicConv(round(512*cfg.width), round(1024*cfg.width),
-                                     kernel_size=3, padding=1, stride=1,
-                                     act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-
-        # ---------------------- Yolox's output projection ----------------------
-        self.out_layers = nn.ModuleList([
-            BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
-                      act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-                      for in_dim in [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
-                      ])
-        self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
-
-        # Initialize all layers
-        self.init_weights()
-
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, features):
-        c3, c4, c5 = features
-
-        # ------------------ Top down FPN ------------------
-        ## P5 -> P4
-        p5 = self.reduce_layer_1(c5)
-        p5_up = F.interpolate(p5, scale_factor=2.0)
-        p4 = self.reduce_layer_2(c4)
-        p4 = self.top_down_layer_1(torch.cat([p5_up, p4], dim=1))
-
-        ## P4 -> P3
-        p4_in = self.reduce_layer_3(p4)
-        p4_up = F.interpolate(p4_in, scale_factor=2.0)
-        p3 = self.reduce_layer_4(c3)
-        p3 = self.top_down_layer_2(torch.cat([p4_up, p3], dim=1))
-
-        # ------------------ Bottom up PAN ------------------
-        ## P3 -> P4
-        p3_ds = self.downsample_layer_1(p3)
-        p4 = torch.cat([p3_ds, p4], dim=1)
-        p4 = self.bottom_up_layer_1(p4)
-
-        ## P4 -> P5
-        p4_ds = self.downsample_layer_2(p4)
-        p5 = torch.cat([p4_ds, c5], dim=1)
-        p5 = self.bottom_up_layer_2(p5)
-
-        out_feats = [self.head_conv_1(p3), self.head_conv_2(p4), self.head_conv_3(p5)]
-            
-        # output proj layers
-        out_feats_proj = []
-        for feat, layer in zip(out_feats, self.out_layers):
-            out_feats_proj.append(layer(feat))
-            
-        return out_feats_proj
-
-
-if __name__=='__main__':
-    import time
-    from thop import profile
-    # Model config
-    
-    # YOLOv7-Base config
-    class Yolov7BaseConfig(object):
-        def __init__(self) -> None:
-            # ---------------- Model config ----------------
-            self.width    = 0.50
-            self.depth    = 0.34
-            self.out_stride = [8, 16, 32]
-            self.max_stride = 32
-            self.num_levels = 3
-            ## FPN
-            self.fpn_act  = 'silu'
-            self.fpn_norm = 'BN'
-            self.fpn_depthwise = False
-            ## Head
-            self.head_dim = 256
-
-    cfg = Yolov7BaseConfig()
-    # Build a head
-    in_dims  = [128, 256, 512]
-    fpn = Yolov7PaFPN(cfg, in_dims)
-
-    # Inference
-    x = [torch.randn(1, in_dims[0], 80, 80),
-         torch.randn(1, in_dims[1], 40, 40),
-         torch.randn(1, in_dims[2], 20, 20)]
-    t0 = time.time()
-    output = fpn(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    print('====== FPN output ====== ')
-    for level, feat in enumerate(output):
-        print("- Level-{} : ".format(level), feat.shape)
-
-    flops, params = profile(fpn, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 198
yolo/models/yolov7_af/yolov7_af_pred.py

@@ -1,198 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-# -------------------- Detection Pred Layer --------------------
-## Single-level pred layer
-class AFDetPredLayer(nn.Module):
-    def __init__(self,
-                 cls_dim      :int,
-                 reg_dim      :int,
-                 stride       :int,
-                 num_classes  :int,
-                 ):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride  = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-
-        # --------- Network Parameters ----------
-        self.obj_pred = nn.Conv2d(self.cls_dim, 1, kernel_size=1)
-        self.cls_pred = nn.Conv2d(self.cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(self.reg_dim, 4, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # Init bias
-        init_prob = 0.01
-        bias_value = -torch.log(torch.tensor((1. - init_prob) / init_prob))
-        # obj pred
-        b = self.obj_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.obj_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # cls pred
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(bias_value.item())
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        w = self.reg_pred.weight
-        w.data.fill_(0.)
-        self.reg_pred.weight = torch.nn.Parameter(w, requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors = anchors + 0.5
-        anchors = anchors * self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # 预测层
-        obj_pred = self.obj_pred(reg_feat)
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # 生成网格坐标
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-
-        # 对 pred 的size做一些view调整,便于后续的处理
-        # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
-        obj_pred = obj_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 1)
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
-        
-        # 解算边界框坐标
-        cxcy_pred = reg_pred[..., :2] * self.stride + anchors
-        bwbh_pred = torch.exp(reg_pred[..., 2:]) * self.stride
-        pred_x1y1 = cxcy_pred - bwbh_pred * 0.5
-        pred_x2y2 = cxcy_pred + bwbh_pred * 0.5
-        box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-        # output dict
-        outputs = {"pred_obj": obj_pred,       # (torch.Tensor) [B, M, 1]
-                   "pred_cls": cls_pred,       # (torch.Tensor) [B, M, C]
-                   "pred_reg": reg_pred,       # (torch.Tensor) [B, M, 4]
-                   "pred_box": box_pred,       # (torch.Tensor) [B, M, 4]
-                   "anchors" : anchors,        # (torch.Tensor) [M, 2]
-                   "fmp_size": fmp_size,
-                   "stride"  : self.stride,    # (Int)
-                   }
-
-        return outputs
-
-## Multi-level pred layer
-class Yolov7AFDetPredLayer(nn.Module):
-    def __init__(self, cfg):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [AFDetPredLayer(cls_dim      = round(cfg.head_dim * cfg.width),
-                            reg_dim      = round(cfg.head_dim * cfg.width),
-                            stride       = cfg.out_stride[level],
-                            num_classes  = cfg.num_classes,)
-                            for level in range(cfg.num_levels)
-                            ])
-
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_fmp_sizes = []
-        all_obj_preds = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.cfg.num_levels):
-            # -------------- Single-level prediction --------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # collect results
-            all_obj_preds.append(outputs["pred_obj"])
-            all_cls_preds.append(outputs["pred_cls"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_box_preds.append(outputs["pred_box"])
-            all_fmp_sizes.append(outputs["fmp_size"])
-            all_anchors.append(outputs["anchors"])
-        
-        # output dict
-        outputs = {"pred_obj":  all_obj_preds,         # List(Tensor) [B, M, 1]
-                   "pred_cls":  all_cls_preds,         # List(Tensor) [B, M, C]
-                   "pred_reg":  all_reg_preds,         # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box":  all_box_preds,         # List(Tensor) [B, M, 4]
-                   "fmp_sizes": all_fmp_sizes,         # List(Tensor) [M, 1]
-                   "anchors":   all_anchors,           # List(Tensor) [M, 2]
-                   "strides":   self.cfg.out_stride,   # List(Int) = [8, 16, 32]
-                   }
-
-        return outputs
-
-
-if __name__=='__main__':
-    import time
-    from thop import profile
-    # Model config
-    
-    # YOLOv7AF-Base config
-    class Yolov7AFBaseConfig(object):
-        def __init__(self) -> None:
-            # ---------------- Model config ----------------
-            self.width    = 1.0
-            self.depth    = 1.0
-            self.out_stride = [8, 16, 32]
-            self.max_stride = 32
-            self.num_levels = 3
-            ## Head
-            self.head_dim  = 256
-
-    cfg = Yolov7AFBaseConfig()
-    cfg.num_classes = 20
-    # Build a pred layer
-    pred = Yolov7AFDetPredLayer(cfg)
-
-    # Inference
-    cls_feats = [torch.randn(1, cfg.head_dim, 80, 80),
-                 torch.randn(1, cfg.head_dim, 40, 40),
-                 torch.randn(1, cfg.head_dim, 20, 20),]
-    reg_feats = [torch.randn(1, cfg.head_dim, 80, 80),
-                 torch.randn(1, cfg.head_dim, 40, 40),
-                 torch.randn(1, cfg.head_dim, 20, 20),]
-    t0 = time.time()
-    output = pred(cls_feats, reg_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    print('====== Pred output ======= ')
-    pred_obj = output["pred_obj"]
-    pred_cls = output["pred_cls"]
-    pred_reg = output["pred_reg"]
-    pred_box = output["pred_box"]
-    anchors  = output["anchors"]
-    
-    for level in range(cfg.num_levels):
-        print("- Level-{} : objectness       -> {}".format(level, pred_obj[level].shape))
-        print("- Level-{} : classification   -> {}".format(level, pred_cls[level].shape))
-        print("- Level-{} : delta regression -> {}".format(level, pred_reg[level].shape))
-        print("- Level-{} : bbox regression  -> {}".format(level, pred_box[level].shape))
-        print("- Level-{} : anchor boxes     -> {}".format(level, anchors[level].shape))
-
-    flops, params = profile(pred, inputs=(cls_feats, reg_feats, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))