yjh0410 1 rok pred
rodič
commit
081b7eaec2
3 zmenil súbory, kde vykonal 2 pridanie a 11 odobranie
  1. 1 1
      yolo/engine.py
  2. 0 8
      yolo/utils/misc.py
  3. 1 2
      yolo/utils/solver/lr_scheduler.py

+ 1 - 1
yolo/engine.py

@@ -69,7 +69,7 @@ class YoloTrainer(object):
 
         # ---------------------------- Build LR Scheduler ----------------------------
         warmup_iters = cfg.warmup_epoch * len(self.train_loader)
-        self.lr_scheduler_warmup = LinearWarmUpLrScheduler(warmup_iters, cfg.base_lr, cfg.warmup_bias_lr, cfg.warmup_momentum)
+        self.lr_scheduler_warmup = LinearWarmUpLrScheduler(warmup_iters, cfg.base_lr, cfg.warmup_bias_lr)
         self.lr_scheduler = build_lr_scheduler(cfg, self.optimizer, args.resume)
 
     def train(self, model):

+ 0 - 8
yolo/utils/misc.py

@@ -373,14 +373,6 @@ def load_weight(model, path_to_ckpt, fuse_cbn=False, fuse_rep_conv=False):
 
     return model
 
-def get_total_grad_norm(parameters, norm_type=2):
-    parameters = list(filter(lambda p: p.grad is not None, parameters))
-    norm_type = float(norm_type)
-    device = parameters[0].grad.device
-    total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]),
-                            norm_type)
-    return total_norm
-
 ## Model EMA
 class ModelEMA(object):
     def __init__(self, model, ema_decay=0.9999, ema_tau=2000, resume=None):

+ 1 - 2
yolo/utils/solver/lr_scheduler.py

@@ -5,9 +5,8 @@ import torch
 # ------------------------- WarmUp LR Scheduler -------------------------
 ## Warmup LR Scheduler
 class LinearWarmUpLrScheduler(object):
-    def __init__(self, wp_iter=500, base_lr=0.01, warmup_bias_lr=0.1, warmup_momentum=0.8):
+    def __init__(self, wp_iter=500, base_lr=0.01, warmup_bias_lr=0.0):
         self.wp_iter = wp_iter
-        self.warmup_momentum = warmup_momentum
         self.base_lr = base_lr
         self.warmup_bias_lr = warmup_bias_lr