yjh0410 1 vuosi sitten
vanhempi
sitoutus
71ea3a5477

+ 1 - 1
yolo/config/yolov1_config.py

@@ -64,7 +64,7 @@ class Yolov1BaseConfig(object):
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 150
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 1 - 1
yolo/config/yolov2_config.py

@@ -67,7 +67,7 @@ class Yolov2BaseConfig(object):
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 150
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 2 - 2
yolo/config/yolov3_config.py

@@ -81,13 +81,13 @@ class Yolov3BaseConfig(object):
         self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
         self.momentum     = 0.9
         self.weight_decay = 0.05
-        self.clip_max_norm   = 35.
+        self.clip_max_norm   = 35.0
         self.warmup_bias_lr  = 0.1
         self.warmup_momentum = 0.8
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 300
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 2 - 2
yolo/config/yolov5_af_config.py

@@ -79,13 +79,13 @@ class Yolov5AFBaseConfig(object):
         self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
         self.momentum     = 0.9
         self.weight_decay = 0.05
-        self.clip_max_norm   = 10.0
+        self.clip_max_norm   = 35.0
         self.warmup_bias_lr  = 0.1
         self.warmup_momentum = 0.8
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 300
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 2 - 2
yolo/config/yolov5_config.py

@@ -81,13 +81,13 @@ class Yolov5BaseConfig(object):
         self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
         self.momentum     = 0.9
         self.weight_decay = 0.05
-        self.clip_max_norm   = 10.0
+        self.clip_max_norm   = 35.0
         self.warmup_bias_lr  = 0.1
         self.warmup_momentum = 0.8
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 300
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 2 - 2
yolo/config/yolov7_af_config.py

@@ -75,13 +75,13 @@ class Yolov7AFBaseConfig(object):
         self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
         self.momentum     = 0.9
         self.weight_decay = 0.05
-        self.clip_max_norm   = 10.0
+        self.clip_max_norm   = 35.0
         self.warmup_bias_lr  = 0.1
         self.warmup_momentum = 0.8
 
         # ---------------- Lr Scheduler config ----------------
         self.warmup_epoch = 3
-        self.lr_scheduler = "cosine"
+        self.lr_scheduler = "linear"
         self.max_epoch    = 300
         self.eval_epoch   = 10
         self.no_aug_epoch = 20

+ 1 - 1
yolo/config/yolov8_config.py

@@ -81,7 +81,7 @@ class Yolov8BaseConfig(object):
         self.min_lr_ratio = 0.01      # min_lr  = base_lr * min_lr_ratio
         self.momentum     = 0.9
         self.weight_decay = 0.05
-        self.clip_max_norm   = 10.0
+        self.clip_max_norm   = 35.0
         self.warmup_bias_lr  = 0.1
         self.warmup_momentum = 0.8
 

+ 15 - 12
yolo/engine.py

@@ -2,6 +2,7 @@ import torch
 import torch.distributed as dist
 
 import os
+import numpy as np
 import random
 
 # ----------------- Extra Components -----------------
@@ -11,7 +12,7 @@ from utils.vis_tools import vis_data
 
 # ----------------- Optimizer & LrScheduler Components -----------------
 from utils.solver.optimizer import build_yolo_optimizer, build_rtdetr_optimizer
-from utils.solver.lr_scheduler import LinearWarmUpLrScheduler, build_lr_scheduler
+from utils.solver.lr_scheduler import LinearWarmUpLrScheduler, build_lr_scheduler, build_lambda_lr_scheduler
 
 
 class YoloTrainer(object):
@@ -67,9 +68,10 @@ class YoloTrainer(object):
         self.optimizer, self.start_epoch = build_yolo_optimizer(cfg, model, args.resume)
 
         # ---------------------------- Build LR Scheduler ----------------------------
-        warmup_iters = cfg.warmup_epoch * len(self.train_loader)
-        self.lr_scheduler_warmup = LinearWarmUpLrScheduler(warmup_iters, cfg.base_lr, cfg.warmup_bias_lr, cfg.warmup_momentum)
-        self.lr_scheduler = build_lr_scheduler(cfg, self.optimizer, args.resume)
+        self.lr_scheduler, self.lf = build_lambda_lr_scheduler(cfg, self.optimizer, cfg.max_epoch)
+        self.lr_scheduler.last_epoch = self.start_epoch - 1  # do not move
+        if self.args.resume and self.args.resume != 'None':
+            self.lr_scheduler.step()
 
     def train(self, model):
         for epoch in range(self.start_epoch, self.cfg.max_epoch):
@@ -95,8 +97,7 @@ class YoloTrainer(object):
             self.train_one_epoch(model)
 
             # LR Schedule
-            if (epoch + 1) > self.cfg.warmup_epoch:
-                self.lr_scheduler.step()
+            self.lr_scheduler.step()
 
             # eval one epoch
             if self.heavy_eval:
@@ -145,7 +146,6 @@ class YoloTrainer(object):
                     'model': model_eval.state_dict(),
                     'mAP': round(cur_map*100, 1),
                     'optimizer':  self.optimizer.state_dict(),
-                    'lr_scheduler': self.lr_scheduler.state_dict(),
                     'epoch': self.epoch,
                     'args': self.args,
                     }
@@ -177,11 +177,14 @@ class YoloTrainer(object):
         for iter_i, (images, targets) in enumerate(metric_logger.log_every(self.train_loader, print_freq, header)):
             ni = iter_i + self.epoch * epoch_size
             # Warmup
-            if nw > 0 and ni < nw:
-                self.lr_scheduler_warmup(ni, self.optimizer)
-            elif ni == nw:
-                print("Warmup stage is over.")
-                self.lr_scheduler_warmup.set_lr(self.optimizer, self.cfg.base_lr)
+            if ni <= nw:
+                xi = [0, nw]  # x interp
+                for j, x in enumerate(self.optimizer.param_groups):
+                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
+                    x['lr'] = np.interp(
+                        ni, xi, [self.cfg.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(self.epoch)])
+                    if 'momentum' in x:
+                        x['momentum'] = np.interp(ni, xi, [self.cfg.warmup_momentum, self.cfg.momentum])
                                 
             # To device
             images = images.to(self.device, non_blocking=True).float()

+ 20 - 2
yolo/utils/solver/lr_scheduler.py

@@ -1,4 +1,5 @@
 import numpy as np
+import math
 import torch
 
 
@@ -30,8 +31,7 @@ def build_lr_scheduler(cfg, optimizer, resume=None):
     print('LR Scheduler: {}'.format(cfg.lr_scheduler))
 
     if cfg.lr_scheduler == "step":
-        lr_step = [cfg.max_epoch // 3, cfg.max_epoch // 3 * 2]
-        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_step, gamma=0.1)
+        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.lr_step, gamma=0.1)
     elif cfg.lr_scheduler == "cosine":
         lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=cfg.max_epoch - cfg.warmup_epoch - 1, eta_min=cfg.min_lr)
     else:
@@ -46,3 +46,21 @@ def build_lr_scheduler(cfg, optimizer, resume=None):
             lr_scheduler.load_state_dict(checkpoint_state_dict)
 
     return lr_scheduler
+
+def build_lambda_lr_scheduler(cfg, optimizer, epochs):
+    """Build learning rate scheduler from cfg file."""
+    print('==============================')
+    print('Lr Scheduler: {}'.format(cfg.lr_scheduler))
+    # Cosine LR scheduler
+    if cfg.lr_scheduler == 'cosine':
+        lf = lambda x: ((1 - math.cos(x * math.pi / epochs)) / 2) * (cfg.min_lr_ratio - 1) + 1
+    # Linear LR scheduler
+    elif cfg.lr_scheduler == 'linear':
+        lf = lambda x: (1 - x / epochs) * (1.0 - cfg.min_lr_ratio) + cfg.min_lr_ratio
+
+    else:
+        print('unknown lr scheduler.')
+        exit(0)
+    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
+
+    return scheduler, lf