yjh0410 пре 1 година
родитељ
комит
f6aa3d89dd

+ 0 - 4
yolo/config/__init__.py

@@ -9,7 +9,6 @@ from .yolov8_config    import build_yolov8_config
 from .gelan_config     import build_gelan_config
 from .rtdetr_config    import build_rtdetr_config
 
-from .yolox2_config    import build_yolox2_config
 
 def build_config(args):
     print('==============================')
@@ -35,9 +34,6 @@ def build_config(args):
     elif 'rtdetr' in args.model:
         cfg = build_rtdetr_config(args)
 
-    elif 'yolox2' in args.model:
-        cfg = build_yolox2_config(args)
-
     else:
         raise NotImplementedError("Unknown model config: {}".format(args.model))
     

+ 0 - 191
yolo/config/yolox2_config.py

@@ -1,191 +0,0 @@
-# yolo Config
-
-
-def build_yolox2_config(args):
-    if   args.model == 'yolox2_n':
-        return Yolox2NConfig()
-    elif args.model == 'yolox2_s':
-        return Yolox2SConfig()
-    elif args.model == 'yolox2_m':
-        return Yolox2MConfig()
-    elif args.model == 'yolox2_l':
-        return Yolox2LConfig()
-    elif args.model == 'yolox2_x':
-        return Yolox2XConfig()
-    else:
-        raise NotImplementedError("No config for model: {}".format(args.model))
-    
-# YOLOx2-Base config
-class Yolox2BaseConfig(object):
-    def __init__(self) -> None:
-        # ---------------- Model config ----------------
-        self.width    = 1.0
-        self.depth    = 1.0
-        self.out_stride = [8, 16, 32]
-        self.max_stride = 32
-        self.num_levels = 3
-        self.scale      = "b"
-        ## Backbone
-        self.bk_act   = 'silu'
-        self.bk_norm  = 'BN'
-        self.bk_depthwise = False
-        self.use_pretrained = True
-        ## Neck
-        self.neck_act       = 'silu'
-        self.neck_norm      = 'BN'
-        self.neck_depthwise = False
-        self.neck_expand_ratio = 0.5
-        self.spp_pooling_size  = 5
-        ## FPN
-        self.fpn_act  = 'silu'
-        self.fpn_norm = 'BN'
-        self.fpn_depthwise = False
-        ## Head
-        self.head_act  = 'silu'
-        self.head_norm = 'BN'
-        self.head_depthwise = False
-        self.head_dim       = 256
-        self.num_cls_head   = 2
-        self.num_reg_head   = 2
-
-        # ---------------- Post-process config ----------------
-        ## Post process
-        self.val_topk = 1000
-        self.val_conf_thresh = 0.001
-        self.val_nms_thresh  = 0.7
-        self.test_topk = 100
-        self.test_conf_thresh = 0.3
-        self.test_nms_thresh  = 0.5
-
-        # ---------------- Assignment config ----------------
-        ## Matcher
-        self.ota_soft_center_radius = 3.0
-        self.ota_topk_candidates = 13
-        ## Loss weight
-        self.loss_cls = 1.0
-        self.loss_box = 2.0
-
-        # ---------------- ModelEMA config ----------------
-        self.use_ema = True
-        self.ema_decay = 0.9998
-        self.ema_tau   = 2000
-
-        # ---------------- Optimizer config ----------------
-        self.trainer      = 'yolo'
-        self.optimizer    = 'adamw'
-        self.per_image_lr = 0.001 / 64
-        self.base_lr      = None      # base_lr = per_image_lr * batch_size
-        self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
-        self.momentum     = 0.9
-        self.weight_decay = 0.05
-        self.clip_max_norm   = 35.0
-        self.warmup_bias_lr  = 0.1
-        self.warmup_momentum = 0.8
-
-        # ---------------- Lr Scheduler config ----------------
-        self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
-        self.max_epoch    = 300
-        self.eval_epoch   = 10
-        self.no_aug_epoch = 20
-
-        # ---------------- Data process config ----------------
-        self.aug_type = 'yolo'
-        self.box_format = 'xyxy'
-        self.normalize_coords = False
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.0           # approximated by the YOLOX's mixup
-        self.multi_scale = [0.5, 1.25]   # multi scale: [img_size * 0.5, img_size * 1.25]
-        ## Pixel mean & std
-        self.pixel_mean = [0., 0., 0.]
-        self.pixel_std  = [255., 255., 255.]
-        ## Transforms
-        self.train_img_size = 640
-        self.test_img_size  = 640
-        self.use_ablu = True
-        self.affine_params = {
-            'degrees': 0.0,
-            'translate': 0.2,
-            'scale': [0.1, 2.0],
-            'shear': 0.0,
-            'perspective': 0.0,
-            'hsv_h': 0.015,
-            'hsv_s': 0.7,
-            'hsv_v': 0.4,
-        }
-
-    def print_config(self):
-        config_dict = {key: value for key, value in self.__dict__.items() if not key.startswith('__')}
-        for k, v in config_dict.items():
-            print("{} : {}".format(k, v))
-
-# YOLOx2-N
-class Yolox2NConfig(Yolox2BaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 0.25
-        self.depth = 0.34
-        self.scale = "n"
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# YOLOx2-S
-class Yolox2SConfig(Yolox2BaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 0.50
-        self.depth = 0.34
-        self.scale = "s"
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.0
-        self.copy_paste  = 0.5
-
-# YOLOx2-M
-class Yolox2MConfig(Yolox2BaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 0.75
-        self.depth = 0.67
-        self.scale = "m"
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5
-
-# YOLOx2-L
-class Yolox2LConfig(Yolox2BaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 1.0
-        self.depth = 1.0
-        self.scale = "l"
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5
-
-# YOLOx2-X
-class Yolox2XConfig(Yolox2BaseConfig):
-    def __init__(self) -> None:
-        super().__init__()
-        # ---------------- Model config ----------------
-        self.width = 1.25
-        self.depth = 1.34
-        self.scale = "x"
-
-        # ---------------- Data process config ----------------
-        self.mosaic_prob = 1.0
-        self.mixup_prob  = 0.1
-        self.copy_paste  = 0.5

+ 0 - 4
yolo/models/__init__.py

@@ -12,7 +12,6 @@ from .yolov8.build    import build_yolov8
 from .gelan.build     import build_gelan
 from .rtdetr.build    import build_rtdetr
 
-from .yolox2.build import build_yolox2
 
 # build object detector
 def build_model(args, cfg, is_val=False):
@@ -45,9 +44,6 @@ def build_model(args, cfg, is_val=False):
     elif 'rtdetr' in args.model:
         model, criterion = build_rtdetr(cfg, is_val)
 
-    elif 'yolox2' in args.model:
-        model, criterion = build_yolox2(cfg, is_val)
-
     if is_val:
         # ------------ Load pretrained weight ------------
         if args.pretrained is not None:

+ 7 - 5
yolo/models/yolov5/yolov5_head.py

@@ -133,11 +133,13 @@ if __name__=='__main__':
     from thop import profile
     # Model config
     
-    # YOLOv3-Base config
+    # YOLOv5-Base config
     class Yolov5BaseConfig(object):
         def __init__(self) -> None:
             # ---------------- Model config ----------------
-            self.out_stride = 32
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.out_stride = [8, 16, 32]
             self.max_stride = 32
             self.num_levels = 3
             ## Head
@@ -161,10 +163,10 @@ if __name__=='__main__':
     cls_feats, reg_feats = head(pyramid_feats)
     t1 = time.time()
     print('Time: ', t1 - t0)
-    for cls_f, reg_f in zip(cls_feats, reg_feats):
-        print(cls_f.shape, reg_f.shape)
+    print("====== Yolov5 Head output ======")
+    for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
+        print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
 
-    print('==============================')
     flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
     print('==============================')
     print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))

+ 42 - 1
yolo/models/yolov5/yolov5_neck.py

@@ -1,7 +1,10 @@
 import torch
 import torch.nn as nn
 
-from .yolov5_basic import BasicConv
+try:
+    from .yolov5_basic import BasicConv
+except:
+    from  yolov5_basic import BasicConv
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -31,3 +34,41 @@ class SPPF(nn.Module):
         y2 = self.m(y1)
 
         return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv5-Base config
+    class Yolov5BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Neck
+            self.neck_act       = 'lrelu'
+            self.neck_norm      = 'BN'
+            self.neck_depthwise = False
+            self.neck_expand_ratio = 0.5
+            self.spp_pooling_size  = 5
+
+    cfg = Yolov5BaseConfig()
+    # Build a head
+    in_dim  = 512
+    out_dim = 512
+    neck = SPPF(cfg, in_dim, out_dim)
+
+    # Inference
+    x = torch.randn(1, in_dim, 20, 20)
+    t0 = time.time()
+    output = neck(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('Neck output: ', output.shape)
+
+    flops, params = profile(neck, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 48 - 1
yolo/models/yolov5/yolov5_pafpn.py

@@ -3,7 +3,10 @@ import torch
 import torch.nn as nn
 import torch.nn.functional as F
 
-from .yolov5_basic import BasicConv, CSPBlock
+try:
+    from .yolov5_basic import BasicConv, CSPBlock
+except:
+    from  yolov5_basic import BasicConv, CSPBlock
 
 
 # Yolov5FPN
@@ -105,3 +108,47 @@ class Yolov5PaFPN(nn.Module):
             out_feats_proj.append(layer(feat))
             
         return out_feats_proj
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv5-Base config
+    class Yolov5BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## FPN
+            self.fpn_act  = 'silu'
+            self.fpn_norm = 'BN'
+            self.fpn_depthwise = False
+            ## Head
+            self.head_dim = 256
+
+    cfg = Yolov5BaseConfig()
+    # Build a head
+    in_dims  = [128, 256, 512]
+    fpn = Yolov5PaFPN(cfg, in_dims)
+
+    # Inference
+    x = [torch.randn(1, in_dims[0], 80, 80),
+         torch.randn(1, in_dims[1], 40, 40),
+         torch.randn(1, in_dims[2], 20, 20)]
+    t0 = time.time()
+    output = fpn(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== FPN output ====== ')
+    for level, feat in enumerate(output):
+        print("- Level-{} : ".format(level), feat.shape)
+
+    flops, params = profile(fpn, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 56 - 0
yolo/models/yolov5/yolov5_pred.py

@@ -155,3 +155,59 @@ class Yolov5DetPredLayer(nn.Module):
                    }
 
         return outputs
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov5BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 1.0
+            self.depth    = 1.0
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## Head
+            self.head_dim  = 256
+            self.anchor_size = {0: [[10, 13],   [16, 30],   [33, 23]],
+                                1: [[30, 61],   [62, 45],   [59, 119]],
+                                2: [[116, 90],  [156, 198], [373, 326]]}
+
+    cfg = Yolov5BaseConfig()
+    cfg.num_classes = 20
+    # Build a pred layer
+    pred = Yolov5DetPredLayer(cfg)
+
+    # Inference
+    cls_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    reg_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    t0 = time.time()
+    output = pred(cls_feats, reg_feats)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== Pred output ======= ')
+    pred_obj = output["pred_obj"]
+    pred_cls = output["pred_cls"]
+    pred_reg = output["pred_reg"]
+    pred_box = output["pred_box"]
+    anchors  = output["anchors"]
+    
+    for level in range(cfg.num_levels):
+        print("- Level-{} : objectness       -> {}".format(level, pred_obj[level].shape))
+        print("- Level-{} : classification   -> {}".format(level, pred_cls[level].shape))
+        print("- Level-{} : delta regression -> {}".format(level, pred_reg[level].shape))
+        print("- Level-{} : bbox regression  -> {}".format(level, pred_box[level].shape))
+        print("- Level-{} : anchor boxes     -> {}".format(level, anchors[level].shape))
+
+    flops, params = profile(pred, inputs=(cls_feats, reg_feats, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 11 - 8
yolo/models/yolov5_af/yolov5_af_head.py

@@ -133,11 +133,13 @@ if __name__=='__main__':
     from thop import profile
     # Model config
     
-    # YOLOv3-Base config
-    class YoloxBaseConfig(object):
+    # YOLOv5-Base config
+    class Yolov5BaseConfig(object):
         def __init__(self) -> None:
             # ---------------- Model config ----------------
-            self.out_stride = 32
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.out_stride = [8, 16, 32]
             self.max_stride = 32
             self.num_levels = 3
             ## Head
@@ -148,7 +150,7 @@ if __name__=='__main__':
             self.num_cls_head   = 2
             self.num_reg_head   = 2
 
-    cfg = YoloxBaseConfig()
+    cfg = Yolov5BaseConfig()
     # Build a head
     pyramid_feats = [torch.randn(1, cfg.head_dim, 80, 80),
                      torch.randn(1, cfg.head_dim, 40, 40),
@@ -161,11 +163,12 @@ if __name__=='__main__':
     cls_feats, reg_feats = head(pyramid_feats)
     t1 = time.time()
     print('Time: ', t1 - t0)
-    for cls_f, reg_f in zip(cls_feats, reg_feats):
-        print(cls_f.shape, reg_f.shape)
+    print("====== Yolov5 Head output ======")
+    for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
+        print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
 
-    print('==============================')
     flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
     print('==============================')
     print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))    
+    print('Params : {:.2f} M'.format(params / 1e6))
+    

+ 42 - 1
yolo/models/yolov5_af/yolov5_af_neck.py

@@ -1,7 +1,10 @@
 import torch
 import torch.nn as nn
 
-from .yolov5_af_basic import BasicConv
+try:
+    from .yolov5_af_basic import BasicConv
+except:
+    from  yolov5_af_basic import BasicConv
 
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
@@ -31,3 +34,41 @@ class SPPF(nn.Module):
         y2 = self.m(y1)
 
         return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv5-Base config
+    class Yolov5BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Neck
+            self.neck_act       = 'lrelu'
+            self.neck_norm      = 'BN'
+            self.neck_depthwise = False
+            self.neck_expand_ratio = 0.5
+            self.spp_pooling_size  = 5
+
+    cfg = Yolov5BaseConfig()
+    # Build a head
+    in_dim  = 512
+    out_dim = 512
+    neck = SPPF(cfg, in_dim, out_dim)
+
+    # Inference
+    x = torch.randn(1, in_dim, 20, 20)
+    t0 = time.time()
+    output = neck(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('Neck output: ', output.shape)
+
+    flops, params = profile(neck, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 48 - 1
yolo/models/yolov5_af/yolov5_af_pafpn.py

@@ -3,7 +3,10 @@ import torch
 import torch.nn as nn
 import torch.nn.functional as F
 
-from .yolov5_af_basic import BasicConv, CSPBlock
+try:
+    from .yolov5_af_basic import BasicConv, CSPBlock
+except:
+    from  yolov5_af_basic import BasicConv, CSPBlock
 
 
 # Yolov5FPN
@@ -106,3 +109,47 @@ class Yolov5PaFPN(nn.Module):
             out_feats_proj.append(layer(feat))
             
         return out_feats_proj
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv5-Base config
+    class Yolov5BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## FPN
+            self.fpn_act  = 'silu'
+            self.fpn_norm = 'BN'
+            self.fpn_depthwise = False
+            ## Head
+            self.head_dim = 256
+
+    cfg = Yolov5BaseConfig()
+    # Build a head
+    in_dims  = [128, 256, 512]
+    fpn = Yolov5PaFPN(cfg, in_dims)
+
+    # Inference
+    x = [torch.randn(1, in_dims[0], 80, 80),
+         torch.randn(1, in_dims[1], 40, 40),
+         torch.randn(1, in_dims[2], 20, 20)]
+    t0 = time.time()
+    output = fpn(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== FPN output ====== ')
+    for level, feat in enumerate(output):
+        print("- Level-{} : ".format(level), feat.shape)
+
+    flops, params = profile(fpn, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 53 - 0
yolo/models/yolov5_af/yolov5_af_pred.py

@@ -143,3 +143,56 @@ class Yolov5AFDetPredLayer(nn.Module):
                    }
 
         return outputs
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv5AF-Base config
+    class Yolov5AFBaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 1.0
+            self.depth    = 1.0
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## Head
+            self.head_dim  = 256
+
+    cfg = Yolov5AFBaseConfig()
+    cfg.num_classes = 20
+    # Build a pred layer
+    pred = Yolov5AFDetPredLayer(cfg)
+
+    # Inference
+    cls_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    reg_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    t0 = time.time()
+    output = pred(cls_feats, reg_feats)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== Pred output ======= ')
+    pred_obj = output["pred_obj"]
+    pred_cls = output["pred_cls"]
+    pred_reg = output["pred_reg"]
+    pred_box = output["pred_box"]
+    anchors  = output["anchors"]
+    
+    for level in range(cfg.num_levels):
+        print("- Level-{} : objectness       -> {}".format(level, pred_obj[level].shape))
+        print("- Level-{} : classification   -> {}".format(level, pred_cls[level].shape))
+        print("- Level-{} : delta regression -> {}".format(level, pred_reg[level].shape))
+        print("- Level-{} : bbox regression  -> {}".format(level, pred_box[level].shape))
+        print("- Level-{} : anchor boxes     -> {}".format(level, anchors[level].shape))
+
+    flops, params = profile(pred, inputs=(cls_feats, reg_feats, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 10 - 8
yolo/models/yolov7_af/yolov7_af_head.py

@@ -133,11 +133,12 @@ if __name__=='__main__':
     from thop import profile
     # Model config
     
-    # YOLOv3-Base config
-    class YoloxBaseConfig(object):
+    # YOLOv7-Base config
+    class Yolov7BaseConfig(object):
         def __init__(self) -> None:
             # ---------------- Model config ----------------
-            self.out_stride = 32
+            self.width    = 0.50
+            self.out_stride = [8, 16, 32]
             self.max_stride = 32
             self.num_levels = 3
             ## Head
@@ -148,7 +149,7 @@ if __name__=='__main__':
             self.num_cls_head   = 2
             self.num_reg_head   = 2
 
-    cfg = YoloxBaseConfig()
+    cfg = Yolov7BaseConfig()
     # Build a head
     pyramid_feats = [torch.randn(1, cfg.head_dim, 80, 80),
                      torch.randn(1, cfg.head_dim, 40, 40),
@@ -161,11 +162,12 @@ if __name__=='__main__':
     cls_feats, reg_feats = head(pyramid_feats)
     t1 = time.time()
     print('Time: ', t1 - t0)
-    for cls_f, reg_f in zip(cls_feats, reg_feats):
-        print(cls_f.shape, reg_f.shape)
+    print("====== Yolov7 Head output ======")
+    for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
+        print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
 
-    print('==============================')
     flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
     print('==============================')
     print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))    
+    print('Params : {:.2f} M'.format(params / 1e6))
+      

+ 40 - 2
yolo/models/yolov7_af/yolov7_af_neck.py

@@ -3,10 +3,10 @@ import torch.nn as nn
 from .yolov7_af_basic import BasicConv
 
 
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
+# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv7-AF by Glenn Jocher
 class SPPF(nn.Module):
     """
-        This code referenced to https://github.com/ultralytics/yolov5
+        This code referenced to https://github.com/ultralytics/yolov7-AF
     """
     def __init__(self, cfg, in_dim, out_dim, expansion=0.5):
         super().__init__()
@@ -58,3 +58,41 @@ class SPPFBlockCSP(nn.Module):
         y = self.cv3(torch.cat([x1, x2], dim=1))
 
         return y
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv7-AF-Base config
+    class Yolov7AFBaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Neck
+            self.neck_act       = 'lrelu'
+            self.neck_norm      = 'BN'
+            self.neck_depthwise = False
+            self.neck_expand_ratio = 0.5
+            self.spp_pooling_size  = 5
+
+    cfg = Yolov7AFBaseConfig()
+    # Build a head
+    in_dim  = 512
+    out_dim = 512
+    neck = SPPF(cfg, in_dim, out_dim)
+
+    # Inference
+    x = torch.randn(1, in_dim, 20, 20)
+    t0 = time.time()
+    output = neck(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('Neck output: ', output.shape)
+
+    flops, params = profile(neck, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 49 - 2
yolo/models/yolov7_af/yolov7_af_pafpn.py

@@ -3,10 +3,13 @@ import torch
 import torch.nn as nn
 import torch.nn.functional as F
 
-from .yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
+try:
+    from .yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
+except:
+    from  yolov7_af_basic import BasicConv, ELANLayerFPN, MDown
 
 
-# PaFPN-ELAN (YOLOv7's)
+# Yolov7 af PaFPN
 class Yolov7PaFPN(nn.Module):
     def __init__(self, cfg, in_dims: List = [512, 1024, 512]):
         super(Yolov7PaFPN, self).__init__()
@@ -112,3 +115,47 @@ class Yolov7PaFPN(nn.Module):
         out_feats = [self.head_conv_1(p3), self.head_conv_2(p4), self.head_conv_3(p5)]
             
         return out_feats
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv7-Base config
+    class Yolov7BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## FPN
+            self.fpn_act  = 'silu'
+            self.fpn_norm = 'BN'
+            self.fpn_depthwise = False
+            ## Head
+            self.head_dim = 256
+
+    cfg = Yolov7BaseConfig()
+    # Build a head
+    in_dims  = [128, 256, 512]
+    fpn = Yolov7PaFPN(cfg, in_dims)
+
+    # Inference
+    x = [torch.randn(1, in_dims[0], 80, 80),
+         torch.randn(1, in_dims[1], 40, 40),
+         torch.randn(1, in_dims[2], 20, 20)]
+    t0 = time.time()
+    output = fpn(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== FPN output ====== ')
+    for level, feat in enumerate(output):
+        print("- Level-{} : ".format(level), feat.shape)
+
+    flops, params = profile(fpn, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 53 - 0
yolo/models/yolov7_af/yolov7_af_pred.py

@@ -143,3 +143,56 @@ class Yolov7AFDetPredLayer(nn.Module):
                    }
 
         return outputs
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv7AF-Base config
+    class Yolov7AFBaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 1.0
+            self.depth    = 1.0
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## Head
+            self.head_dim  = 256
+
+    cfg = Yolov7AFBaseConfig()
+    cfg.num_classes = 20
+    # Build a pred layer
+    pred = Yolov7AFDetPredLayer(cfg)
+
+    # Inference
+    cls_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    reg_feats = [torch.randn(1, cfg.head_dim, 80, 80),
+                 torch.randn(1, cfg.head_dim, 40, 40),
+                 torch.randn(1, cfg.head_dim, 20, 20),]
+    t0 = time.time()
+    output = pred(cls_feats, reg_feats)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== Pred output ======= ')
+    pred_obj = output["pred_obj"]
+    pred_cls = output["pred_cls"]
+    pred_reg = output["pred_reg"]
+    pred_box = output["pred_box"]
+    anchors  = output["anchors"]
+    
+    for level in range(cfg.num_levels):
+        print("- Level-{} : objectness       -> {}".format(level, pred_obj[level].shape))
+        print("- Level-{} : classification   -> {}".format(level, pred_cls[level].shape))
+        print("- Level-{} : delta regression -> {}".format(level, pred_reg[level].shape))
+        print("- Level-{} : bbox regression  -> {}".format(level, pred_box[level].shape))
+        print("- Level-{} : anchor boxes     -> {}".format(level, anchors[level].shape))
+
+    flops, params = profile(pred, inputs=(cls_feats, reg_feats, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 54 - 1
yolo/models/yolov8/yolov8_head.py

@@ -1,7 +1,10 @@
 import torch
 import torch.nn as nn
 
-from .yolov8_basic import BasicConv
+try:
+    from .yolov8_basic import BasicConv
+except:
+    from  yolov8_basic import BasicConv
 
 
 # -------------------- Detection Head --------------------
@@ -124,3 +127,53 @@ class Yolov8DetHead(nn.Module):
             reg_feats.append(reg_feat)
 
         return cls_feats, reg_feats
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov8BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.ratio    = 2.0
+            self.reg_max  = 16
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## Head
+            self.head_act  = 'lrelu'
+            self.head_norm = 'BN'
+            self.head_depthwise = False
+            self.num_cls_head   = 2
+            self.num_reg_head   = 2
+
+    cfg = Yolov8BaseConfig()
+    cfg.num_classes = 20
+
+    # Build a head
+    fpn_dims = [128, 256, 512]
+    pyramid_feats = [torch.randn(1, fpn_dims[0], 80, 80),
+                     torch.randn(1, fpn_dims[1], 40, 40),
+                     torch.randn(1, fpn_dims[2], 20, 20)]
+    head = Yolov8DetHead(cfg, fpn_dims)
+
+
+    # Inference
+    t0 = time.time()
+    cls_feats, reg_feats = head(pyramid_feats)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print("====== Yolov8 Head output ======")
+    for level, (cls_f, reg_f) in enumerate(zip(cls_feats, reg_feats)):
+        print("- Level-{} : ".format(level), cls_f.shape, reg_f.shape)
+
+    flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))
+    

+ 43 - 2
yolo/models/yolov8/yolov8_neck.py

@@ -1,8 +1,11 @@
 import torch
 import torch.nn as nn
 
-from .yolov8_basic import BasicConv
-
+try:
+    from .yolov8_basic import BasicConv
+except:
+    from  yolov8_basic import BasicConv
+    
 
 # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
 class SPPF(nn.Module):
@@ -31,3 +34,41 @@ class SPPF(nn.Module):
         y2 = self.m(y1)
 
         return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov8BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.out_stride = 32
+            self.max_stride = 32
+            ## Neck
+            self.neck_act       = 'lrelu'
+            self.neck_norm      = 'BN'
+            self.neck_depthwise = False
+            self.neck_expand_ratio = 0.5
+            self.spp_pooling_size  = 5
+
+    cfg = Yolov8BaseConfig()
+    # Build a head
+    in_dim  = 512
+    out_dim = 512
+    neck = SPPF(cfg, in_dim, out_dim)
+
+    # Inference
+    x = torch.randn(1, in_dim, 20, 20)
+    t0 = time.time()
+    output = neck(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('Neck output: ', output.shape)
+
+    flops, params = profile(neck, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 50 - 2
yolo/models/yolov8/yolov8_pafpn.py

@@ -3,10 +3,13 @@ import torch.nn as nn
 import torch.nn.functional as F
 from typing import List
 
-from .yolov8_basic import BasicConv, ELANLayer
+try:
+    from .yolov8_basic import BasicConv, ELANLayer
+except:
+    from  yolov8_basic import BasicConv, ELANLayer
 
 
-# Modified YOLOv8's PaFPN
+# YOLOv8's PaFPN
 class Yolov8PaFPN(nn.Module):
     def __init__(self,
                  cfg,
@@ -102,3 +105,48 @@ class Yolov8PaFPN(nn.Module):
         out_feats = [p3, p4, p5] # [P3, P4, P5]
                 
         return out_feats
+    
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov8BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 0.50
+            self.depth    = 0.34
+            self.ratio    = 2.0
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## FPN
+            self.fpn_act  = 'silu'
+            self.fpn_norm = 'BN'
+            self.fpn_depthwise = False
+            ## Head
+            self.head_dim = 256
+
+    cfg = Yolov8BaseConfig()
+    # Build a head
+    in_dims  = [128, 256, 512]
+    fpn = Yolov8PaFPN(cfg, in_dims)
+
+    # Inference
+    x = [torch.randn(1, in_dims[0], 80, 80),
+         torch.randn(1, in_dims[1], 40, 40),
+         torch.randn(1, in_dims[2], 20, 20)]
+    t0 = time.time()
+    output = fpn(x)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== FPN output ====== ')
+    for level, feat in enumerate(output):
+        print("- Level-{} : ".format(level), feat.shape)
+
+    flops, params = profile(fpn, inputs=(x, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 54 - 0
yolo/models/yolov8/yolov8_pred.py

@@ -154,3 +154,57 @@ class Yolov8DetPredLayer(nn.Module):
                    }
 
         return outputs
+
+
+if __name__=='__main__':
+    import time
+    from thop import profile
+    # Model config
+    
+    # YOLOv8-Base config
+    class Yolov8BaseConfig(object):
+        def __init__(self) -> None:
+            # ---------------- Model config ----------------
+            self.width    = 1.0
+            self.depth    = 1.0
+            self.ratio    = 1.0
+            self.reg_max  = 16
+            self.out_stride = [8, 16, 32]
+            self.max_stride = 32
+            self.num_levels = 3
+            ## Head
+
+    cfg = Yolov8BaseConfig()
+    cfg.num_classes = 20
+    cls_dim = 128
+    reg_dim = 64
+    # Build a pred layer
+    pred = Yolov8DetPredLayer(cfg, cls_dim, reg_dim)
+
+    # Inference
+    cls_feats = [torch.randn(1, cls_dim, 80, 80),
+                 torch.randn(1, cls_dim, 40, 40),
+                 torch.randn(1, cls_dim, 20, 20),]
+    reg_feats = [torch.randn(1, reg_dim, 80, 80),
+                 torch.randn(1, reg_dim, 40, 40),
+                 torch.randn(1, reg_dim, 20, 20),]
+    t0 = time.time()
+    output = pred(cls_feats, reg_feats)
+    t1 = time.time()
+    print('Time: ', t1 - t0)
+    print('====== Pred output ======= ')
+    pred_cls = output["pred_cls"]
+    pred_reg = output["pred_reg"]
+    pred_box = output["pred_box"]
+    anchors  = output["anchors"]
+    
+    for level in range(cfg.num_levels):
+        print("- Level-{} : classification   -> {}".format(level, pred_cls[level].shape))
+        print("- Level-{} : delta regression -> {}".format(level, pred_reg[level].shape))
+        print("- Level-{} : bbox regression  -> {}".format(level, pred_box[level].shape))
+        print("- Level-{} : anchor boxes     -> {}".format(level, anchors[level].shape))
+
+    flops, params = profile(pred, inputs=(cls_feats, reg_feats, ), verbose=False)
+    print('==============================')
+    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
+    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 61
yolo/models/yolox2/README.md

@@ -1,61 +0,0 @@
-# Anchor-free YOLOv5:
-
-- VOC
-
-|     Model   | Batch | Scale | AP<sup>val<br>0.5 | Weight |  Logs  |
-|-------------|-------|-------|-------------------|--------|--------|
-| YOLOv5-AF-S | 1xb16 |  640  |       82.4        | [ckpt](https://github.com/yjh0410/YOLO-Tutorial-v5/releases/download/yolo_tutorial_ckpt/yolov5_af_s_voc.pth) | [log](https://github.com/yjh0410/YOLO-Tutorial-v5/releases/download/yolo_tutorial_ckpt/YOLOv5-AF-S-VOC.txt) |
-
-- COCO
-
-|    Model    | Batch | Scale | AP<sup>val<br>0.5:0.95 | AP<sup>val<br>0.5 | FLOPs<br><sup>(G) | Params<br><sup>(M) | Weight |  Logs  |
-|-------------|-------|-------|------------------------|-------------------|-------------------|--------------------|--------|--------|
-| YOLOv5-AF-S | 1xb16 |  640  |                    |               |   26.9            |   8.9             |  |  |
-
-- For training, we train redesigned YOLOv5-AF with 300 epochs on COCO. We also use the gradient accumulation.
-- For data augmentation, we use the RandomAffine, RandomHSV, Mosaic and YOLOX's Mixup augmentation.
-- For optimizer, we use AdamW with weight decay of 0.05 and per image base lr of 0.001 / 64.
-- For learning rate scheduler, we use cosine decay scheduler.
-- For batch size, we set it to 16, and we also use the gradient accumulation to approximate batch size of 256.
-
-
-## Train YOLOv5-AF
-### Single GPU
-Taking training YOLOv5-AF-S on COCO as the example,
-```Shell
-python train.py --cuda -d coco --root path/to/coco -m yolov5_af_s -bs 16 --fp16 
-```
-
-### Multi GPU
-Taking training YOLOv5-AF-S on COCO as the example,
-```Shell
-python -m torch.distributed.run --nproc_per_node=8 train.py --cuda --distributed -d coco --root path/to/coco -m yolov5_af_s -bs 16 --fp16 
-```
-
-## Test YOLOv5-AF
-Taking testing YOLOv5-AF-S on COCO-val as the example,
-```Shell
-python test.py --cuda -d coco --root path/to/coco -m yolov5_af_s --weight path/to/yolov5.pth --show 
-```
-
-## Evaluate YOLOv5-AF
-Taking evaluating YOLOv5-AF-S on COCO-val as the example,
-```Shell
-python eval.py --cuda -d coco --root path/to/coco -m yolov5_af_s --weight path/to/yolov5.pth 
-```
-
-## Demo
-### Detect with Image
-```Shell
-python demo.py --mode image --path_to_img path/to/image_dirs/ --cuda -m yolov5_af_s --weight path/to/weight --show
-```
-
-### Detect with Video
-```Shell
-python demo.py --mode video --path_to_vid path/to/video --cuda -m yolov5_af_s --weight path/to/weight --show --gif
-```
-
-### Detect with Camera
-```Shell
-python demo.py --mode camera --cuda -m yolov5_af_s --weight path/to/weight --show --gif
-```

+ 0 - 24
yolo/models/yolox2/build.py

@@ -1,24 +0,0 @@
-import torch.nn as nn
-
-from .loss import SetCriterion
-from .yolox2 import Yolox2
-
-
-# build object detector
-def build_yolox2(cfg, is_val=False):
-    # -------------- Build YOLO --------------
-    model = Yolox2(cfg, is_val)
-
-    # -------------- Initialize YOLO --------------
-    for m in model.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03    
-            
-    # -------------- Build criterion --------------
-    criterion = None
-    if is_val:
-        # build criterion for training
-        criterion = SetCriterion(cfg)
-        
-    return model, criterion

+ 0 - 128
yolo/models/yolox2/loss.py

@@ -1,128 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from utils.box_ops import get_ious, bbox2dist
-from utils.distributed_utils import get_world_size, is_dist_avail_and_initialized
-
-from .matcher import AlignedSimOTA
-
-
-class SetCriterion(object):
-    def __init__(self, cfg):
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        # --------------- Loss config ---------------
-        self.loss_cls_weight = cfg.loss_cls
-        self.loss_box_weight = cfg.loss_box
-        # --------------- Matcher config ---------------
-        self.matcher = AlignedSimOTA(soft_center_radius = cfg.ota_soft_center_radius,
-                                     topk_candidates    = cfg.ota_topk_candidates,
-                                     num_classes        = cfg.num_classes,
-                                     )
-
-    def loss_classes(self, pred_cls, target, beta=2.0):
-        # Quality FocalLoss
-        """
-            pred_cls: (torch.Tensor): [N, C]。
-            target:   (tuple([torch.Tensor], [torch.Tensor])): label -> (N,), score -> (N)
-        """
-        label, score = target
-        pred_sigmoid = pred_cls.sigmoid()
-        scale_factor = pred_sigmoid
-        zerolabel = scale_factor.new_zeros(pred_cls.shape)
-
-        ce_loss = F.binary_cross_entropy_with_logits(
-            pred_cls, zerolabel, reduction='none') * scale_factor.pow(beta)
-        
-        bg_class_ind = pred_cls.shape[-1]
-        pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
-        if pos.shape[0] > 0:
-            pos_label = label[pos].long()
-
-            scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
-
-            ce_loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
-                pred_cls[pos, pos_label], score[pos],
-                reduction='none') * scale_factor.abs().pow(beta)
-
-        return ce_loss
-    
-    def loss_bboxes(self, pred_box, gt_box):
-        ious = get_ious(pred_box, gt_box, box_mode="xyxy", iou_type='giou')
-        loss_box = 1.0 - ious
-
-        return loss_box
-
-    def __call__(self, outputs, targets):        
-        """
-            outputs['pred_cls']: List(Tensor) [B, M, C]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['pred_box']: List(Tensor) [B, M, 4]
-            outputs['strides']: List(Int) [8, 16, 32] output stride
-            targets: (List) [dict{'boxes': [...], 
-                                 'labels': [...], 
-                                 'orig_size': ...}, ...]
-        """
-        bs          = outputs['pred_cls'][0].shape[0]
-        device      = outputs['pred_cls'][0].device
-        fpn_strides = outputs['strides']
-        anchors     = outputs['anchors']
-        # preds: [B, M, C]
-        cls_preds = torch.cat(outputs['pred_cls'], dim=1)
-        box_preds = torch.cat(outputs['pred_box'], dim=1)
-        
-        # --------------- label assignment ---------------
-        cls_targets = []
-        box_targets = []
-        assign_metrics = []
-        for batch_idx in range(bs):
-            tgt_labels = targets[batch_idx]["labels"].to(device)  # [N,]
-            tgt_bboxes = targets[batch_idx]["boxes"].to(device)   # [N, 4]
-            assigned_result = self.matcher(fpn_strides=fpn_strides,
-                                           anchors=anchors,
-                                           pred_cls=cls_preds[batch_idx].detach(),
-                                           pred_box=box_preds[batch_idx].detach(),
-                                           gt_labels=tgt_labels,
-                                           gt_bboxes=tgt_bboxes
-                                           )
-            cls_targets.append(assigned_result['assigned_labels'])
-            box_targets.append(assigned_result['assigned_bboxes'])
-            assign_metrics.append(assigned_result['assign_metrics'])
-
-        # List[B, M, C] -> Tensor[BM, C]
-        cls_targets = torch.cat(cls_targets, dim=0)
-        box_targets = torch.cat(box_targets, dim=0)
-        assign_metrics = torch.cat(assign_metrics, dim=0)
-
-        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
-        bg_class_ind = self.num_classes
-        pos_inds = ((cls_targets >= 0) & (cls_targets < bg_class_ind)).nonzero().squeeze(1)
-        num_fgs = assign_metrics.sum()
-
-        if is_dist_avail_and_initialized():
-            torch.distributed.all_reduce(num_fgs)
-        num_fgs = (num_fgs / get_world_size()).clamp(1.0).item()
-
-        # ------------------ Classification loss ------------------
-        cls_preds = cls_preds.view(-1, self.num_classes)
-        loss_cls = self.loss_classes(cls_preds, (cls_targets, assign_metrics))
-        loss_cls = loss_cls.sum() / num_fgs
-
-        # ------------------ Regression loss ------------------
-        box_preds_pos = box_preds.view(-1, 4)[pos_inds]
-        box_targets_pos = box_targets[pos_inds]
-        loss_box = self.loss_bboxes(box_preds_pos, box_targets_pos)
-        loss_box = loss_box.sum() / num_fgs
-
-        # total loss
-        losses = self.loss_cls_weight * loss_cls + \
-                 self.loss_box_weight * loss_box
-        loss_dict = dict(
-                loss_cls = loss_cls,
-                loss_box = loss_box,
-                losses = losses
-        )
-
-        return loss_dict
-    

+ 0 - 160
yolo/models/yolox2/matcher.py

@@ -1,160 +0,0 @@
-# ------------------------------------------------------------------------------------------
-# This code referenced to https://github.com/open-mmlab/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py
-# ------------------------------------------------------------------------------------------
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from utils.box_ops import box_iou
-
-
-# -------------------------- Aligned SimOTA assigner --------------------------
-class AlignedSimOTA(object):
-    def __init__(self, num_classes, soft_center_radius=3.0, topk_candidates=13):
-        self.num_classes = num_classes
-        self.soft_center_radius = soft_center_radius
-        self.topk_candidates = topk_candidates
-
-    @torch.no_grad()
-    def __call__(self, 
-                 fpn_strides, 
-                 anchors, 
-                 pred_cls, 
-                 pred_box, 
-                 gt_labels,
-                 gt_bboxes):
-        # [M,]
-        strides = torch.cat([torch.ones_like(anchor_i[:, 0]) * stride_i
-                                for stride_i, anchor_i in zip(fpn_strides, anchors)], dim=-1)
-        # List[F, M, 2] -> [M, 2]
-        num_gt = len(gt_labels)
-        anchors = torch.cat(anchors, dim=0)
-
-        # check gt
-        if num_gt == 0 or gt_bboxes.max().item() == 0.:
-            return {
-                'assigned_labels': gt_labels.new_full(pred_cls[..., 0].shape, self.num_classes).long(),
-                'assigned_bboxes': gt_bboxes.new_full(pred_box.shape, 0).float(),
-                'assign_metrics':  gt_bboxes.new_full(pred_cls[..., 0].shape, 0).float(),
-            }
-        
-        # get inside points: [N, M]
-        is_in_gt = self.find_inside_points(gt_bboxes, anchors)
-        valid_mask = is_in_gt.sum(dim=0) > 0  # [M,]
-
-        # ----------------------------------- soft center prior -----------------------------------
-        gt_center = (gt_bboxes[..., :2] + gt_bboxes[..., 2:]) / 2.0
-        distance = (anchors.unsqueeze(0) - gt_center.unsqueeze(1)
-                    ).pow(2).sum(-1).sqrt() / strides.unsqueeze(0)  # [N, M]
-        distance = distance * valid_mask.unsqueeze(0)
-        soft_center_prior = torch.pow(10, distance - self.soft_center_radius)
-
-        # ----------------------------------- regression cost -----------------------------------
-        pair_wise_ious, _ = box_iou(gt_bboxes, pred_box)  # [N, M]
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) * 3.0
-
-        # ----------------------------------- classification cost -----------------------------------
-        ## select the predicted scores corresponded to the gt_labels
-        pairwise_pred_scores = pred_cls.permute(1, 0)  # [M, C] -> [C, M]
-        pairwise_pred_scores = pairwise_pred_scores[gt_labels.long(), :].float()   # [N, M]
-        ## scale factor
-        scale_factor = (pair_wise_ious - pairwise_pred_scores.sigmoid()).abs().pow(2.0)
-        ## cls cost
-        pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
-            pairwise_pred_scores, pair_wise_ious,
-            reduction="none") * scale_factor # [N, M]
-            
-        del pairwise_pred_scores
-
-        ## foreground cost matrix
-        cost_matrix = pair_wise_cls_loss + pair_wise_ious_loss + soft_center_prior
-        max_pad_value = torch.ones_like(cost_matrix) * 1e9
-        cost_matrix = torch.where(valid_mask[None].repeat(num_gt, 1),   # [N, M]
-                                  cost_matrix, max_pad_value)
-
-        # ----------------------------------- dynamic label assignment -----------------------------------
-        matched_pred_ious, matched_gt_inds, fg_mask_inboxes = self.dynamic_k_matching(
-            cost_matrix, pair_wise_ious, num_gt)
-        del pair_wise_cls_loss, cost_matrix, pair_wise_ious, pair_wise_ious_loss
-
-        # -----------------------------------process assigned labels -----------------------------------
-        assigned_labels = gt_labels.new_full(pred_cls[..., 0].shape,
-                                             self.num_classes)  # [M,]
-        assigned_labels[fg_mask_inboxes] = gt_labels[matched_gt_inds].squeeze(-1)
-        assigned_labels = assigned_labels.long()  # [M,]
-
-        assigned_bboxes = gt_bboxes.new_full(pred_box.shape, 0)        # [M, 4]
-        assigned_bboxes[fg_mask_inboxes] = gt_bboxes[matched_gt_inds]  # [M, 4]
-
-        assign_metrics = gt_bboxes.new_full(pred_cls[..., 0].shape, 0) # [M, 4]
-        assign_metrics[fg_mask_inboxes] = matched_pred_ious            # [M, 4]
-
-        assigned_dict = dict(
-            assigned_labels=assigned_labels,
-            assigned_bboxes=assigned_bboxes,
-            assign_metrics=assign_metrics
-            )
-        
-        return assigned_dict
-
-    def find_inside_points(self, gt_bboxes, anchors):
-        """
-            gt_bboxes: Tensor -> [N, 2]
-            anchors:   Tensor -> [M, 2]
-        """
-        num_anchors = anchors.shape[0]
-        num_gt = gt_bboxes.shape[0]
-
-        anchors_expand = anchors.unsqueeze(0).repeat(num_gt, 1, 1)           # [N, M, 2]
-        gt_bboxes_expand = gt_bboxes.unsqueeze(1).repeat(1, num_anchors, 1)  # [N, M, 4]
-
-        # offset
-        lt = anchors_expand - gt_bboxes_expand[..., :2]
-        rb = gt_bboxes_expand[..., 2:] - anchors_expand
-        bbox_deltas = torch.cat([lt, rb], dim=-1)
-
-        is_in_gts = bbox_deltas.min(dim=-1).values > 0
-
-        return is_in_gts
-    
-    def dynamic_k_matching(self, cost_matrix, pairwise_ious, num_gt):
-        """Use IoU and matching cost to calculate the dynamic top-k positive
-        targets.
-
-        Args:
-            cost_matrix (Tensor): Cost matrix.
-            pairwise_ious (Tensor): Pairwise iou matrix.
-            num_gt (int): Number of gt.
-            valid_mask (Tensor): Mask for valid bboxes.
-        Returns:
-            tuple: matched ious and gt indexes.
-        """
-        matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8)
-        # select candidate topk ious for dynamic-k calculation
-        candidate_topk = min(self.topk_candidates, pairwise_ious.size(1))
-        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1)
-        # calculate dynamic k for each gt
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-
-        # sorting the batch cost matirx is faster than topk
-        _, sorted_indices = torch.sort(cost_matrix, dim=1)
-        for gt_idx in range(num_gt):
-            topk_ids = sorted_indices[gt_idx, :dynamic_ks[gt_idx]]
-            matching_matrix[gt_idx, :][topk_ids] = 1
-
-        del topk_ious, dynamic_ks, topk_ids
-
-        prior_match_gt_mask = matching_matrix.sum(0) > 1
-        if prior_match_gt_mask.sum() > 0:
-            cost_min, cost_argmin = torch.min(
-                cost_matrix[:, prior_match_gt_mask], dim=0)
-            matching_matrix[:, prior_match_gt_mask] *= 0
-            matching_matrix[cost_argmin, prior_match_gt_mask] = 1
-
-        # get foreground mask inside box and center prior
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-        matched_pred_ious = (matching_matrix *
-                             pairwise_ious).sum(0)[fg_mask_inboxes]
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-
-        return matched_pred_ious, matched_gt_inds, fg_mask_inboxes
-    

+ 0 - 152
yolo/models/yolox2/yolox2.py

@@ -1,152 +0,0 @@
-# --------------- Torch components ---------------
-import torch
-import torch.nn as nn
-
-# --------------- Model components ---------------
-from .yolox2_backbone import Yolov5Backbone
-from .yolox2_neck     import SPPF
-from .yolox2_pafpn    import Yolov5PaFPN
-from .yolox2_head     import Yolov5DetHead
-from .yolox2_pred     import Yolov5AFDetPredLayer
-
-# --------------- External components ---------------
-from utils.misc import multiclass_nms
-
-
-# Yolov5AF
-class Yolox2(nn.Module):
-    def __init__(self,
-                 cfg,
-                 is_val = False,
-                 ) -> None:
-        super(Yolox2, self).__init__()
-        # ---------------------- Basic setting ----------------------
-        self.cfg = cfg
-        self.num_classes = cfg.num_classes
-        ## Post-process parameters
-        self.topk_candidates  = cfg.val_topk        if is_val else cfg.test_topk
-        self.conf_thresh      = cfg.val_conf_thresh if is_val else cfg.test_conf_thresh
-        self.nms_thresh       = cfg.val_nms_thresh  if is_val else cfg.test_nms_thresh
-        self.no_multi_labels  = False if is_val else True
-        
-        # ---------------------- Network Parameters ----------------------
-        ## Backbone
-        self.backbone = Yolov5Backbone(cfg)
-        self.pyramid_feat_dims = self.backbone.feat_dims[-3:]
-        ## Neck: SPP
-        self.neck     = SPPF(cfg, self.pyramid_feat_dims[-1], self.pyramid_feat_dims[-1])
-        self.pyramid_feat_dims[-1] = self.neck.out_dim
-        ## Neck: FPN
-        self.fpn      = Yolov5PaFPN(cfg, self.pyramid_feat_dims)
-        ## Head
-        self.head     = Yolov5DetHead(cfg, self.fpn.out_dims)
-        ## Pred
-        self.pred     = Yolov5AFDetPredLayer(cfg)
-
-    def post_process(self, cls_preds, box_preds):
-        """
-        We process predictions at each scale hierarchically
-        Input:
-            cls_preds: List[torch.Tensor] -> [[B, M, C], ...], B=1
-            box_preds: List[torch.Tensor] -> [[B, M, 4], ...], B=1
-        Output:
-            bboxes: np.array -> [N, 4]
-            scores: np.array -> [N,]
-            labels: np.array -> [N,]
-        """
-        all_scores = []
-        all_labels = []
-        all_bboxes = []
-        
-        for cls_pred_i, box_pred_i in zip(cls_preds, box_preds):
-            cls_pred_i = cls_pred_i[0]
-            box_pred_i = box_pred_i[0]
-            if self.no_multi_labels:
-                # [M,]
-                scores, labels = torch.max(cls_pred_i.sigmoid(), dim=1)
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # topk candidates
-                predicted_prob, topk_idxs = scores.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                labels = labels[topk_idxs]
-                bboxes = box_pred_i[topk_idxs]
-            else:
-                # [M, C] -> [MC,]
-                scores_i = cls_pred_i.sigmoid().flatten()
-
-                # Keep top k top scoring indices only.
-                num_topk = min(self.topk_candidates, box_pred_i.size(0))
-
-                # torch.sort is actually faster than .topk (at least on GPUs)
-                predicted_prob, topk_idxs = scores_i.sort(descending=True)
-                topk_scores = predicted_prob[:num_topk]
-                topk_idxs = topk_idxs[:num_topk]
-
-                # filter out the proposals with low confidence score
-                keep_idxs = topk_scores > self.conf_thresh
-                scores = topk_scores[keep_idxs]
-                topk_idxs = topk_idxs[keep_idxs]
-
-                anchor_idxs = torch.div(topk_idxs, self.num_classes, rounding_mode='floor')
-                labels = topk_idxs % self.num_classes
-
-                bboxes = box_pred_i[anchor_idxs]
-
-            all_scores.append(scores)
-            all_labels.append(labels)
-            all_bboxes.append(bboxes)
-
-        scores = torch.cat(all_scores, dim=0)
-        labels = torch.cat(all_labels, dim=0)
-        bboxes = torch.cat(all_bboxes, dim=0)
-
-        # to cpu & numpy
-        scores = scores.cpu().numpy()
-        labels = labels.cpu().numpy()
-        bboxes = bboxes.cpu().numpy()
-
-        # nms
-        scores, labels, bboxes = multiclass_nms(
-            scores, labels, bboxes, self.nms_thresh, self.num_classes)
-        
-        return bboxes, scores, labels
-    
-    def forward(self, x):
-        # ---------------- Backbone ----------------
-        pyramid_feats = self.backbone(x)
-        # ---------------- Neck: SPP ----------------
-        pyramid_feats[-1] = self.neck(pyramid_feats[-1])
-
-        # ---------------- Neck: PaFPN ----------------
-        pyramid_feats = self.fpn(pyramid_feats)
-
-        # ---------------- Heads ----------------
-        cls_feats, reg_feats = self.head(pyramid_feats)
-
-        # ---------------- Preds ----------------
-        outputs = self.pred(cls_feats, reg_feats)
-        outputs['image_size'] = [x.shape[2], x.shape[3]]
-
-        if not self.training:
-            all_cls_preds = outputs['pred_cls']
-            all_box_preds = outputs['pred_box']
-
-            # post process
-            bboxes, scores, labels = self.post_process(all_cls_preds, all_box_preds)
-            outputs = {
-                "scores": scores,
-                "labels": labels,
-                "bboxes": bboxes
-            }
-        
-        return outputs 

+ 0 - 172
yolo/models/yolox2/yolox2_backbone.py

@@ -1,172 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolox2_basic import BasicConv, CSPBlock
-except:
-    from  yolox2_basic import BasicConv, CSPBlock
-
-# IN1K pretrained weight
-pretrained_urls = {
-    'n': None,
-    's': None,
-    'm': None,
-    'l': None,
-    'x': None,
-}
-
-# --------------------- Yolov3's Backbone -----------------------
-## Modified DarkNet
-class Yolov5Backbone(nn.Module):
-    def __init__(self, cfg):
-        super(Yolov5Backbone, self).__init__()
-        # ------------------ Basic setting ------------------
-        self.model_scale = cfg.scale
-        self.feat_dims = [round(64   * cfg.width),
-                          round(128  * cfg.width),
-                          round(256  * cfg.width),
-                          round(512  * cfg.width),
-                          round(1024 * cfg.width)]
-        
-        # ------------------ Network setting ------------------
-        ## P1/2
-        self.layer_1 = BasicConv(3, self.feat_dims[0],
-                                 kernel_size=6, padding=2, stride=2,
-                                 act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise)
-        # P2/4
-        self.layer_2 = nn.Sequential(
-            BasicConv(self.feat_dims[0], self.feat_dims[1],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            CSPBlock(in_dim     = self.feat_dims[1],
-                     out_dim    = self.feat_dims[1],
-                     num_blocks = round(3*cfg.depth),
-                     expansion  = 0.5,
-                     shortcut   = True,
-                     act_type   = cfg.bk_act,
-                     norm_type  = cfg.bk_norm,
-                     depthwise  = cfg.bk_depthwise)
-        )
-        # P3/8
-        self.layer_3 = nn.Sequential(
-            BasicConv(self.feat_dims[1], self.feat_dims[2],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            CSPBlock(in_dim     = self.feat_dims[2],
-                     out_dim    = self.feat_dims[2],
-                     num_blocks = round(9*cfg.depth),
-                     expansion  = 0.5,
-                     shortcut   = True,
-                     act_type   = cfg.bk_act,
-                     norm_type  = cfg.bk_norm,
-                     depthwise  = cfg.bk_depthwise)
-        )
-        # P4/16
-        self.layer_4 = nn.Sequential(
-            BasicConv(self.feat_dims[2], self.feat_dims[3],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            CSPBlock(in_dim     = self.feat_dims[3],
-                     out_dim    = self.feat_dims[3],
-                     num_blocks = round(9*cfg.depth),
-                     expansion  = 0.5,
-                     shortcut   = True,
-                     act_type   = cfg.bk_act,
-                     norm_type  = cfg.bk_norm,
-                     depthwise  = cfg.bk_depthwise)
-        )
-        # P5/32
-        self.layer_5 = nn.Sequential(
-            BasicConv(self.feat_dims[3], self.feat_dims[4],
-                      kernel_size=3, padding=1, stride=2,
-                      act_type=cfg.bk_act, norm_type=cfg.bk_norm, depthwise=cfg.bk_depthwise),
-            CSPBlock(in_dim     = self.feat_dims[4],
-                     out_dim    = self.feat_dims[4],
-                     num_blocks = round(3*cfg.depth),
-                     expansion  = 0.5,
-                     shortcut   = True,
-                     act_type   = cfg.bk_act,
-                     norm_type  = cfg.bk_norm,
-                     depthwise  = cfg.bk_depthwise)
-        )
-
-        # Initialize all layers
-        self.init_weights()
-        
-        # Load imagenet pretrained weight
-        if cfg.use_pretrained:
-            self.load_pretrained()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def load_pretrained(self):
-        url = pretrained_urls[self.model_scale]
-        if url is not None:
-            print('Loading backbone pretrained weight from : {}'.format(url))
-            # checkpoint state dict
-            checkpoint = torch.hub.load_state_dict_from_url(
-                url=url, map_location="cpu", check_hash=True)
-            checkpoint_state_dict = checkpoint.pop("model")
-            # model state dict
-            model_state_dict = self.state_dict()
-            # check
-            for k in list(checkpoint_state_dict.keys()):
-                if k in model_state_dict:
-                    shape_model = tuple(model_state_dict[k].shape)
-                    shape_checkpoint = tuple(checkpoint_state_dict[k].shape)
-                    if shape_model != shape_checkpoint:
-                        checkpoint_state_dict.pop(k)
-                else:
-                    checkpoint_state_dict.pop(k)
-                    print('Unused key: ', k)
-            # load the weight
-            self.load_state_dict(checkpoint_state_dict)
-        else:
-            print('No pretrained weight for model scale: {}.'.format(self.model_scale))
-
-    def forward(self, x):
-        c1 = self.layer_1(x)
-        c2 = self.layer_2(c1)
-        c3 = self.layer_3(c2)
-        c4 = self.layer_4(c3)
-        c5 = self.layer_5(c4)
-        outputs = [c3, c4, c5]
-
-        return outputs
-
-
-if __name__ == '__main__':
-    import time
-    from thop import profile
-    class BaseConfig(object):
-        def __init__(self) -> None:
-            self.bk_act = 'silu'
-            self.bk_norm = 'BN'
-            self.bk_depthwise = False
-            self.width = 0.5
-            self.depth = 0.34
-            self.scale = "s"
-            self.use_pretrained = True
-
-    cfg = BaseConfig()
-    model = Yolov5Backbone(cfg)
-    x = torch.randn(1, 3, 640, 640)
-    t0 = time.time()
-    outputs = model(x)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for out in outputs:
-        print(out.shape)
-
-    x = torch.randn(1, 3, 640, 640)
-    print('==============================')
-    flops, params = profile(model, inputs=(x, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))

+ 0 - 137
yolo/models/yolox2/yolox2_basic.py

@@ -1,137 +0,0 @@
-import torch
-import torch.nn as nn
-from typing import List
-
-
-# --------------------- Basic modules ---------------------
-def get_conv2d(c1, c2, k, p, s, d, g, bias=False):
-    conv = nn.Conv2d(c1, c2, k, stride=s, padding=p, dilation=d, groups=g, bias=bias)
-
-    return conv
-
-def get_activation(act_type=None):
-    if act_type == 'relu':
-        return nn.ReLU(inplace=True)
-    elif act_type == 'lrelu':
-        return nn.LeakyReLU(0.1, inplace=True)
-    elif act_type == 'mish':
-        return nn.Mish(inplace=True)
-    elif act_type == 'silu':
-        return nn.SiLU(inplace=True)
-    elif act_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-def get_norm(norm_type, dim):
-    if norm_type == 'BN':
-        return nn.BatchNorm2d(dim)
-    elif norm_type == 'GN':
-        return nn.GroupNorm(num_groups=32, num_channels=dim)
-    elif norm_type is None:
-        return nn.Identity()
-    else:
-        raise NotImplementedError
-
-class BasicConv(nn.Module):
-    def __init__(self, 
-                 in_dim,                   # in channels
-                 out_dim,                  # out channels 
-                 kernel_size=1,            # kernel size 
-                 padding=0,                # padding
-                 stride=1,                 # padding
-                 dilation=1,               # dilation
-                 act_type  :str = 'lrelu', # activation
-                 norm_type :str = 'BN',    # normalization
-                 depthwise :bool = False
-                ):
-        super(BasicConv, self).__init__()
-        self.depthwise = depthwise
-        if not depthwise:
-            self.conv = get_conv2d(in_dim, out_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=1)
-            self.norm = get_norm(norm_type, out_dim)
-        else:
-            self.conv1 = get_conv2d(in_dim, in_dim, k=kernel_size, p=padding, s=stride, d=dilation, g=in_dim)
-            self.norm1 = get_norm(norm_type, in_dim)
-            self.conv2 = get_conv2d(in_dim, out_dim, k=1, p=0, s=1, d=1, g=1)
-            self.norm2 = get_norm(norm_type, out_dim)
-        self.act  = get_activation(act_type)
-
-    def forward(self, x):
-        if not self.depthwise:
-            return self.act(self.norm(self.conv(x)))
-        else:
-            # Depthwise conv
-            x = self.norm1(self.conv1(x))
-            # Pointwise conv
-            x = self.norm2(self.conv2(x))
-            return x
-
-
-# ---------------------------- Basic Modules ----------------------------
-class YoloBottleneck(nn.Module):
-    def __init__(self,
-                 in_dim       :int,
-                 out_dim      :int,
-                 kernel_size  :List  = [1, 3],
-                 expansion    :float = 0.5,
-                 shortcut     :bool  = False,
-                 act_type     :str   = 'silu',
-                 norm_type    :str   = 'BN',
-                 depthwise    :bool  = False,
-                 ) -> None:
-        super(YoloBottleneck, self).__init__()
-        inter_dim = int(out_dim * expansion)
-        # ----------------- Network setting -----------------
-        self.conv_layer1 = BasicConv(in_dim, inter_dim,
-                                     kernel_size=kernel_size[0], padding=kernel_size[0]//2, stride=1,
-                                     act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.conv_layer2 = BasicConv(inter_dim, out_dim,
-                                     kernel_size=kernel_size[1], padding=kernel_size[1]//2, stride=1,
-                                     act_type=act_type, norm_type=norm_type, depthwise=depthwise)
-        self.shortcut = shortcut and in_dim == out_dim
-
-    def forward(self, x):
-        h = self.conv_layer2(self.conv_layer1(x))
-
-        return x + h if self.shortcut else h
-
-class CSPBlock(nn.Module):
-    def __init__(self,
-                 in_dim,
-                 out_dim,
-                 num_blocks   :int   = 1,
-                 expansion    :float = 0.5,
-                 shortcut     :bool  = False,
-                 act_type     :str   = 'silu',
-                 norm_type    :str   = 'BN',
-                 depthwise    :bool  = False,
-                 ):
-        super(CSPBlock, self).__init__()
-        # ---------- Basic parameters ----------
-        self.num_blocks = num_blocks
-        self.expansion = expansion
-        self.shortcut = shortcut
-        inter_dim = round(out_dim * expansion)
-        # ---------- Model parameters ----------
-        self.conv_layer_1 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_2 = BasicConv(in_dim, inter_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.conv_layer_3 = BasicConv(inter_dim * 2, out_dim, kernel_size=1, act_type=act_type, norm_type=norm_type)
-        self.module       = nn.Sequential(*[YoloBottleneck(inter_dim,
-                                                           inter_dim,
-                                                           kernel_size  = [1, 3],
-                                                           expansion    = 1.0,
-                                                           shortcut     = shortcut,
-                                                           act_type     = act_type,
-                                                           norm_type    = norm_type,
-                                                           depthwise    = depthwise)
-                                                           for _ in range(num_blocks)
-                                                           ])
-
-    def forward(self, x):
-        x1 = self.conv_layer_1(x)
-        x2 = self.module(self.conv_layer_2(x))
-        out = self.conv_layer_3(torch.cat([x1, x2], dim=1))
-
-        return out
-    

+ 0 - 171
yolo/models/yolox2/yolox2_head.py

@@ -1,171 +0,0 @@
-import torch
-import torch.nn as nn
-
-try:
-    from .yolox2_basic import BasicConv
-except:
-    from  yolox2.yolox2_basic import BasicConv
-
-
-## Single-level Detection Head
-class DetHead(nn.Module):
-    def __init__(self,
-                 in_dim       :int  = 256,
-                 cls_head_dim :int  = 256,
-                 reg_head_dim :int  = 256,
-                 num_cls_head :int  = 2,
-                 num_reg_head :int  = 2,
-                 act_type     :str  = "silu",
-                 norm_type    :str  = "BN",
-                 depthwise    :bool = False):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.in_dim = in_dim
-        self.num_cls_head = num_cls_head
-        self.num_reg_head = num_reg_head
-        self.act_type = act_type
-        self.norm_type = norm_type
-        self.depthwise = depthwise
-        
-        # --------- Network Parameters ----------
-        ## cls head
-        cls_feats = []
-        self.cls_head_dim = cls_head_dim
-        for i in range(num_cls_head):
-            if i == 0:
-                cls_feats.append(
-                    BasicConv(in_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                cls_feats.append(
-                    BasicConv(self.cls_head_dim, self.cls_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        ## reg head
-        reg_feats = []
-        self.reg_head_dim = reg_head_dim
-        for i in range(num_reg_head):
-            if i == 0:
-                reg_feats.append(
-                    BasicConv(in_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-            else:
-                reg_feats.append(
-                    BasicConv(self.reg_head_dim, self.reg_head_dim,
-                              kernel_size=3, padding=1, stride=1, 
-                              act_type=act_type,
-                              norm_type=norm_type,
-                              depthwise=depthwise)
-                              )
-        self.cls_feats = nn.Sequential(*cls_feats)
-        self.reg_feats = nn.Sequential(*reg_feats)
-
-        self.init_weights()
-        
-    def init_weights(self):
-        """Initialize the parameters."""
-        for m in self.modules():
-            if isinstance(m, torch.nn.Conv2d):
-                # In order to be consistent with the source code,
-                # reset the Conv2d initialization parameters
-                m.reset_parameters()
-
-    def forward(self, x):
-        """
-            in_feats: (Tensor) [B, C, H, W]
-        """
-        cls_feats = self.cls_feats(x)
-        reg_feats = self.reg_feats(x)
-
-        return cls_feats, reg_feats
-    
-## Multi-level Detection Head
-class Yolov5DetHead(nn.Module):
-    def __init__(self, cfg, in_dims):
-        super().__init__()
-        ## ----------- Network Parameters -----------
-        self.multi_level_heads = nn.ModuleList(
-            [DetHead(in_dim       = in_dims[level],
-                     cls_head_dim = round(cfg.head_dim * cfg.width),
-                     reg_head_dim = round(cfg.head_dim * cfg.width),
-                     num_cls_head = cfg.num_cls_head,
-                     num_reg_head = cfg.num_reg_head,
-                     act_type     = cfg.head_act,
-                     norm_type    = cfg.head_norm,
-                     depthwise    = cfg.head_depthwise)
-                     for level in range(cfg.num_levels)
-                     ])
-        # --------- Basic Parameters ----------
-        self.in_dims = in_dims
-        self.cls_head_dim = cfg.head_dim
-        self.reg_head_dim = cfg.head_dim
-
-
-    def forward(self, feats):
-        """
-            feats: List[(Tensor)] [[B, C, H, W], ...]
-        """
-        cls_feats = []
-        reg_feats = []
-        for feat, head in zip(feats, self.multi_level_heads):
-            # ---------------- Pred ----------------
-            cls_feat, reg_feat = head(feat)
-
-            cls_feats.append(cls_feat)
-            reg_feats.append(reg_feat)
-
-        return cls_feats, reg_feats
-
-
-if __name__=='__main__':
-    import time
-    from thop import profile
-    # Model config
-    
-    # YOLOv3-Base config
-    class YoloxBaseConfig(object):
-        def __init__(self) -> None:
-            # ---------------- Model config ----------------
-            self.out_stride = 32
-            self.max_stride = 32
-            self.num_levels = 3
-            ## Head
-            self.head_act  = 'lrelu'
-            self.head_norm = 'BN'
-            self.head_depthwise = False
-            self.head_dim  = 256
-            self.num_cls_head   = 2
-            self.num_reg_head   = 2
-
-    cfg = YoloxBaseConfig()
-    # Build a head
-    pyramid_feats = [torch.randn(1, cfg.head_dim, 80, 80),
-                     torch.randn(1, cfg.head_dim, 40, 40),
-                     torch.randn(1, cfg.head_dim, 20, 20)]
-    head = Yolov5DetHead(cfg, [cfg.head_dim]*3)
-
-
-    # Inference
-    t0 = time.time()
-    cls_feats, reg_feats = head(pyramid_feats)
-    t1 = time.time()
-    print('Time: ', t1 - t0)
-    for cls_f, reg_f in zip(cls_feats, reg_feats):
-        print(cls_f.shape, reg_f.shape)
-
-    print('==============================')
-    flops, params = profile(head, inputs=(pyramid_feats, ), verbose=False)
-    print('==============================')
-    print('GFLOPs : {:.2f}'.format(flops / 1e9 * 2))
-    print('Params : {:.2f} M'.format(params / 1e6))    

+ 0 - 33
yolo/models/yolox2/yolox2_neck.py

@@ -1,33 +0,0 @@
-import torch
-import torch.nn as nn
-
-from .yolox2_basic import BasicConv
-
-
-# Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
-class SPPF(nn.Module):
-    """
-        This code referenced to https://github.com/ultralytics/yolov5
-    """
-    def __init__(self, cfg, in_dim, out_dim):
-        super().__init__()
-        ## ----------- Basic Parameters -----------
-        inter_dim = round(in_dim * cfg.neck_expand_ratio)
-        self.out_dim = out_dim
-        ## ----------- Network Parameters -----------
-        self.cv1 = BasicConv(in_dim, inter_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.cv2 = BasicConv(inter_dim * 4, out_dim,
-                             kernel_size=1, padding=0, stride=1,
-                             act_type=cfg.neck_act, norm_type=cfg.neck_norm)
-        self.m = nn.MaxPool2d(kernel_size=cfg.spp_pooling_size,
-                              stride=1,
-                              padding=cfg.spp_pooling_size // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        y1 = self.m(x)
-        y2 = self.m(y1)
-
-        return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))

+ 0 - 108
yolo/models/yolox2/yolox2_pafpn.py

@@ -1,108 +0,0 @@
-from typing import List
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .yolox2_basic import BasicConv, CSPBlock
-
-
-# Yolov5FPN
-class Yolov5PaFPN(nn.Module):
-    def __init__(self, cfg, in_dims: List = [256, 512, 1024],
-                 ):
-        super(Yolov5PaFPN, self).__init__()
-        self.in_dims = in_dims
-        c3, c4, c5 = in_dims
-
-        # ---------------------- Yolox's Top down FPN ----------------------
-        ## P5 -> P4
-        self.reduce_layer_1   = BasicConv(c5, round(512*cfg.width),
-                                          kernel_size=1, padding=0, stride=1,
-                                          act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_1 = CSPBlock(in_dim     = c4 + round(512*cfg.width),
-                                         out_dim    = round(512*cfg.width),
-                                         num_blocks = round(3*cfg.depth),
-                                         expansion  = 0.5,
-                                         shortcut   = False,
-                                         act_type   = cfg.fpn_act,
-                                         norm_type  = cfg.fpn_norm,
-                                         depthwise  = cfg.fpn_depthwise)
-
-        ## P4 -> P3
-        self.reduce_layer_2   = BasicConv(round(512*cfg.width), round(256*cfg.width),
-                                          kernel_size=1, padding=0, stride=1,
-                                          act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-        self.top_down_layer_2 = CSPBlock(in_dim     = c3 + round(256*cfg.width),
-                                         out_dim    = round(256*cfg.width),
-                                         num_blocks = round(3*cfg.depth),
-                                         expansion  = 0.5,
-                                         shortcut   = False,
-                                         act_type   = cfg.fpn_act,
-                                         norm_type  = cfg.fpn_norm,
-                                         depthwise  = cfg.fpn_depthwise)
-        
-        # ---------------------- Yolox's Bottom up PAN ----------------------
-        ## P3 -> P4
-        self.downsample_layer_1 = BasicConv(round(256*cfg.width), round(256*cfg.width),
-                                            kernel_size=3, padding=1, stride=2,
-                                            act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.bottom_up_layer_1  = CSPBlock(in_dim     = round(256*cfg.width) + round(256*cfg.width),
-                                           out_dim    = round(512*cfg.width),
-                                           num_blocks = round(3*cfg.depth),
-                                           expansion  = 0.5,
-                                           shortcut   = False,
-                                           act_type   = cfg.fpn_act,
-                                           norm_type  = cfg.fpn_norm,
-                                           depthwise  = cfg.fpn_depthwise)
-        ## P4 -> P5
-        self.downsample_layer_2 = BasicConv(round(512*cfg.width), round(512*cfg.width),
-                                            kernel_size=3, padding=1, stride=2,
-                                            act_type=cfg.fpn_act, norm_type=cfg.fpn_norm, depthwise=cfg.fpn_depthwise)
-        self.bottom_up_layer_2  = CSPBlock(in_dim     = round(512*cfg.width) + round(512*cfg.width),
-                                           out_dim    = round(1024*cfg.width),
-                                           num_blocks = round(3*cfg.depth),
-                                           expansion  = 0.5,
-                                           shortcut   = False,
-                                           act_type   = cfg.fpn_act,
-                                           norm_type  = cfg.fpn_norm,
-                                           depthwise  = cfg.fpn_depthwise)
-
-        # ---------------------- Yolox's output projection ----------------------
-        self.out_layers = nn.ModuleList([
-            BasicConv(in_dim, round(cfg.head_dim*cfg.width), kernel_size=1,
-                      act_type=cfg.fpn_act, norm_type=cfg.fpn_norm)
-                      for in_dim in [round(256*cfg.width), round(512*cfg.width), round(1024*cfg.width)]
-                      ])
-        self.out_dims = [round(cfg.head_dim*cfg.width)] * 3
-
-    def forward(self, features):
-        c3, c4, c5 = features
-        
-        # ------------------ Top down FPN ------------------
-        ## P5 -> P4
-        p5 = self.reduce_layer_1(c5)
-        p5_up = F.interpolate(p5, scale_factor=2.0)
-        p4 = self.top_down_layer_1(torch.cat([c4, p5_up], dim=1))
-
-        ## P4 -> P3
-        p4 = self.reduce_layer_2(p4)
-        p4_up = F.interpolate(p4, scale_factor=2.0)
-        p3 = self.top_down_layer_2(torch.cat([c3, p4_up], dim=1))
-
-        # ------------------ Bottom up PAN ------------------
-        ## P3 -> P4
-        p3_ds = self.downsample_layer_1(p3)
-        p4 = self.bottom_up_layer_1(torch.cat([p4, p3_ds], dim=1))
-
-        ## P4 -> P5
-        p4_ds = self.downsample_layer_2(p4)
-        p5 = self.bottom_up_layer_2(torch.cat([p5, p4_ds], dim=1))
-
-        out_feats = [p3, p4, p5]
-
-        # output proj layers
-        out_feats_proj = []
-        for feat, layer in zip(out_feats, self.out_layers):
-            out_feats_proj.append(layer(feat))
-            
-        return out_feats_proj

+ 0 - 128
yolo/models/yolox2/yolox2_pred.py

@@ -1,128 +0,0 @@
-import math
-import torch
-import torch.nn as nn
-
-# -------------------- Detection Pred Layer --------------------
-## Single-level pred layer
-class AFDetPredLayer(nn.Module):
-    def __init__(self,
-                 cls_dim      :int,
-                 reg_dim      :int,
-                 stride       :int,
-                 num_classes  :int,
-                 ):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.stride  = stride
-        self.cls_dim = cls_dim
-        self.reg_dim = reg_dim
-        self.num_classes = num_classes
-
-        # --------- Network Parameters ----------
-        self.cls_pred = nn.Conv2d(self.cls_dim, num_classes, kernel_size=1)
-        self.reg_pred = nn.Conv2d(self.reg_dim, 4, kernel_size=1)                
-
-        self.init_bias()
-        
-    def init_bias(self):
-        # cls pred bias
-        b = self.cls_pred.bias.view(1, -1)
-        b.data.fill_(math.log(5 / self.num_classes / (640. / self.stride) ** 2))
-        self.cls_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-        # reg pred
-        b = self.reg_pred.bias.view(-1, )
-        b.data.fill_(1.0)
-        self.reg_pred.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
-
-    def generate_anchors(self, fmp_size):
-        """
-            fmp_size: (List) [H, W]
-        """
-        fmp_h, fmp_w = fmp_size
-        anchor_y, anchor_x = torch.meshgrid([torch.arange(fmp_h), torch.arange(fmp_w)])
-
-        # [H, W, 2] -> [HW, 2]
-        anchors = torch.stack([anchor_x, anchor_y], dim=-1).float().view(-1, 2)
-        anchors = anchors + 0.5
-        anchors = anchors * self.stride
-
-        return anchors
-        
-    def forward(self, cls_feat, reg_feat):
-        # 预测层
-        cls_pred = self.cls_pred(cls_feat)
-        reg_pred = self.reg_pred(reg_feat)
-
-        # 生成网格坐标
-        B, _, H, W = cls_pred.size()
-        fmp_size = [H, W]
-        anchors = self.generate_anchors(fmp_size)
-        anchors = anchors.to(cls_pred.device)
-
-        # 对 pred 的size做一些view调整,便于后续的处理
-        # [B, C, H, W] -> [B, H, W, C] -> [B, H*W, C]
-        cls_pred = cls_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, self.num_classes)
-        reg_pred = reg_pred.permute(0, 2, 3, 1).contiguous().view(B, -1, 4)
-        
-        # 解算边界框坐标
-        cxcy_pred = reg_pred[..., :2] * self.stride + anchors
-        bwbh_pred = torch.exp(reg_pred[..., 2:]) * self.stride
-        pred_x1y1 = cxcy_pred - bwbh_pred * 0.5
-        pred_x2y2 = cxcy_pred + bwbh_pred * 0.5
-        box_pred = torch.cat([pred_x1y1, pred_x2y2], dim=-1)
-
-        # output dict
-        outputs = {"pred_cls": cls_pred,       # (torch.Tensor) [B, M, C]
-                   "pred_reg": reg_pred,       # (torch.Tensor) [B, M, 4]
-                   "pred_box": box_pred,       # (torch.Tensor) [B, M, 4]
-                   "anchors" : anchors,        # (torch.Tensor) [M, 2]
-                   "fmp_size": fmp_size,
-                   "stride"  : self.stride,    # (Int)
-                   }
-
-        return outputs
-
-## Multi-level pred layer
-class Yolov5AFDetPredLayer(nn.Module):
-    def __init__(self, cfg):
-        super().__init__()
-        # --------- Basic Parameters ----------
-        self.cfg = cfg
-
-        # ----------- Network Parameters -----------
-        ## pred layers
-        self.multi_level_preds = nn.ModuleList(
-            [AFDetPredLayer(cls_dim      = round(cfg.head_dim * cfg.width),
-                            reg_dim      = round(cfg.head_dim * cfg.width),
-                            stride       = cfg.out_stride[level],
-                            num_classes  = cfg.num_classes,)
-                            for level in range(cfg.num_levels)
-                            ])
-
-    def forward(self, cls_feats, reg_feats):
-        all_anchors = []
-        all_fmp_sizes = []
-        all_cls_preds = []
-        all_reg_preds = []
-        all_box_preds = []
-        for level in range(self.cfg.num_levels):
-            # -------------- Single-level prediction --------------
-            outputs = self.multi_level_preds[level](cls_feats[level], reg_feats[level])
-
-            # collect results
-            all_cls_preds.append(outputs["pred_cls"])
-            all_reg_preds.append(outputs["pred_reg"])
-            all_box_preds.append(outputs["pred_box"])
-            all_fmp_sizes.append(outputs["fmp_size"])
-            all_anchors.append(outputs["anchors"])
-        
-        # output dict
-        outputs = {"pred_cls":  all_cls_preds,         # List(Tensor) [B, M, C]
-                   "pred_reg":  all_reg_preds,         # List(Tensor) [B, M, 4*(reg_max)]
-                   "pred_box":  all_box_preds,         # List(Tensor) [B, M, 4]
-                   "fmp_sizes": all_fmp_sizes,         # List(Tensor) [M, 1]
-                   "anchors":   all_anchors,           # List(Tensor) [M, 2]
-                   "strides":   self.cfg.out_stride,   # List(Int) = [8, 16, 32]
-                   }
-
-        return outputs