|
|
@@ -16,17 +16,16 @@ from utils.vis_tools import vis_data
|
|
|
from evaluator.build import build_evluator
|
|
|
|
|
|
# ----------------- Optimizer & LrScheduler Components -----------------
|
|
|
-from utils.solver.optimizer import build_yolo_optimizer, build_rtdetr_optimizer
|
|
|
+from utils.solver.optimizer import build_optimizer
|
|
|
from utils.solver.lr_scheduler import build_lambda_lr_scheduler
|
|
|
-from utils.solver.lr_scheduler import build_wp_lr_scheduler, build_lr_scheduler
|
|
|
|
|
|
# ----------------- Dataset Components -----------------
|
|
|
from dataset.build import build_dataset, build_transform
|
|
|
|
|
|
|
|
|
# ----------------------- Det trainers -----------------------
|
|
|
-## YOLOv8 Trainer
|
|
|
-class Yolov8Trainer(object):
|
|
|
+## YOLOX Trainer
|
|
|
+class YoloxTrainer(object):
|
|
|
def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
|
|
|
# ------------------- basic parameters -------------------
|
|
|
self.args = args
|
|
|
@@ -35,9 +34,9 @@ class Yolov8Trainer(object):
|
|
|
self.device = device
|
|
|
self.criterion = criterion
|
|
|
self.world_size = world_size
|
|
|
+ self.grad_accumulate = args.grad_accumulate
|
|
|
+ self.no_aug_epoch = args.no_aug_epoch
|
|
|
self.heavy_eval = False
|
|
|
- self.last_opt_step = 0
|
|
|
- self.clip_grad = 10
|
|
|
# weak augmentatino stage
|
|
|
self.second_stage = False
|
|
|
self.third_stage = False
|
|
|
@@ -47,10 +46,10 @@ class Yolov8Trainer(object):
|
|
|
self.path_to_save = os.path.join(args.save_folder, args.dataset, args.model)
|
|
|
os.makedirs(self.path_to_save, exist_ok=True)
|
|
|
|
|
|
- # ---------------------------- Hyperparameters refer to YOLOv8 ----------------------------
|
|
|
- self.optimizer_dict = {'optimizer': 'sgd', 'momentum': 0.937, 'weight_decay': 5e-4, 'lr0': 0.01}
|
|
|
+ # ---------------------------- Hyperparameters refer to YOLOX ----------------------------
|
|
|
+ self.optimizer_dict = {'optimizer': 'sgd', 'momentum': 0.9, 'weight_decay': 5e-4, 'lr0': 0.01}
|
|
|
self.ema_dict = {'ema_decay': 0.9999, 'ema_tau': 2000}
|
|
|
- self.lr_schedule_dict = {'scheduler': 'linear', 'lrf': 0.01}
|
|
|
+ self.lr_schedule_dict = {'scheduler': 'cosine', 'lrf': 0.05}
|
|
|
self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}
|
|
|
|
|
|
# ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
|
|
|
@@ -60,9 +59,9 @@ class Yolov8Trainer(object):
|
|
|
|
|
|
# ---------------------------- Build Transform ----------------------------
|
|
|
self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=model_cfg['max_stride'], is_train=True)
|
|
|
+ args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
self.val_transform, _ = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=model_cfg['max_stride'], is_train=False)
|
|
|
+ args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
|
|
|
|
|
|
# ---------------------------- Build Dataset & Dataloader ----------------------------
|
|
|
self.dataset, self.dataset_info = build_dataset(self.args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
|
|
|
@@ -75,13 +74,11 @@ class Yolov8Trainer(object):
|
|
|
self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
|
|
|
|
|
|
# ---------------------------- Build Optimizer ----------------------------
|
|
|
- accumulate = max(1, round(64 / self.args.batch_size))
|
|
|
- print('Grad Accumulate: {}'.format(accumulate))
|
|
|
- self.optimizer_dict['weight_decay'] *= self.args.batch_size * accumulate / 64
|
|
|
- self.optimizer, self.start_epoch = build_yolo_optimizer(self.optimizer_dict, model, self.args.resume)
|
|
|
+ self.optimizer_dict['lr0'] *= self.args.batch_size * self.grad_accumulate / 64
|
|
|
+ self.optimizer, self.start_epoch = build_optimizer(self.optimizer_dict, model, self.args.resume)
|
|
|
|
|
|
# ---------------------------- Build LR Scheduler ----------------------------
|
|
|
- self.lr_scheduler, self.lf = build_lambda_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch)
|
|
|
+ self.lr_scheduler, self.lf = build_lambda_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch - self.no_aug_epoch)
|
|
|
self.lr_scheduler.last_epoch = self.start_epoch - 1 # do not move
|
|
|
if self.args.resume and self.args.resume != 'None':
|
|
|
self.lr_scheduler.step()
|
|
|
@@ -93,6 +90,7 @@ class Yolov8Trainer(object):
|
|
|
else:
|
|
|
self.model_ema = None
|
|
|
|
|
|
+
|
|
|
def train(self, model):
|
|
|
for epoch in range(self.start_epoch, self.args.max_epoch):
|
|
|
if self.args.distributed:
|
|
|
@@ -125,7 +123,7 @@ class Yolov8Trainer(object):
|
|
|
'epoch': self.epoch,
|
|
|
'args': self.args},
|
|
|
checkpoint_path)
|
|
|
-
|
|
|
+
|
|
|
# train one epoch
|
|
|
self.epoch = epoch
|
|
|
self.train_one_epoch(model)
|
|
|
@@ -200,15 +198,13 @@ class Yolov8Trainer(object):
|
|
|
img_size = self.args.img_size
|
|
|
t0 = time.time()
|
|
|
nw = epoch_size * self.args.wp_epoch
|
|
|
- accumulate = accumulate = max(1, round(64 / self.args.batch_size))
|
|
|
|
|
|
- # train one epoch
|
|
|
+ # Train one epoch
|
|
|
for iter_i, (images, targets) in enumerate(self.train_loader):
|
|
|
ni = iter_i + self.epoch * epoch_size
|
|
|
# Warmup
|
|
|
if ni <= nw:
|
|
|
xi = [0, nw] # x interp
|
|
|
- accumulate = max(1, np.interp(ni, xi, [1, 64 / self.args.batch_size]).round())
|
|
|
for j, x in enumerate(self.optimizer.param_groups):
|
|
|
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
|
|
x['lr'] = np.interp(
|
|
|
@@ -216,54 +212,45 @@ class Yolov8Trainer(object):
|
|
|
if 'momentum' in x:
|
|
|
x['momentum'] = np.interp(ni, xi, [self.warmup_dict['warmup_momentum'], self.optimizer_dict['momentum']])
|
|
|
|
|
|
- # to device
|
|
|
+ # To device
|
|
|
images = images.to(self.device, non_blocking=True).float()
|
|
|
|
|
|
# Multi scale
|
|
|
- if self.args.multi_scale:
|
|
|
+ if self.args.multi_scale and ni % 10 == 0:
|
|
|
images, targets, img_size = self.rescale_image_targets(
|
|
|
images, targets, self.model_cfg['stride'], self.args.min_box_size, self.model_cfg['multi_scale'])
|
|
|
else:
|
|
|
targets = self.refine_targets(targets, self.args.min_box_size)
|
|
|
|
|
|
- # visualize train targets
|
|
|
+ # Visualize train targets
|
|
|
if self.args.vis_tgt:
|
|
|
vis_data(images*255, targets)
|
|
|
|
|
|
- # inference
|
|
|
+ # Inference
|
|
|
with torch.cuda.amp.autocast(enabled=self.args.fp16):
|
|
|
outputs = model(images)
|
|
|
- # loss
|
|
|
+ # Compute loss
|
|
|
loss_dict = self.criterion(outputs=outputs, targets=targets, epoch=self.epoch)
|
|
|
losses = loss_dict['losses']
|
|
|
- losses *= images.shape[0] # loss * bs
|
|
|
+ # Grad Accu
|
|
|
+ if self.grad_accumulate > 1:
|
|
|
+ losses /= self.grad_accumulate
|
|
|
|
|
|
- # reduce
|
|
|
loss_dict_reduced = distributed_utils.reduce_dict(loss_dict)
|
|
|
|
|
|
- # gradient averaged between devices in DDP mode
|
|
|
- losses *= distributed_utils.get_world_size()
|
|
|
-
|
|
|
- # backward
|
|
|
+ # Backward
|
|
|
self.scaler.scale(losses).backward()
|
|
|
|
|
|
# Optimize
|
|
|
- if ni - self.last_opt_step >= accumulate:
|
|
|
- if self.clip_grad > 0:
|
|
|
- # unscale gradients
|
|
|
- self.scaler.unscale_(self.optimizer)
|
|
|
- # clip gradients
|
|
|
- torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.clip_grad)
|
|
|
- # optimizer.step
|
|
|
+ if ni % self.grad_accumulate == 0:
|
|
|
self.scaler.step(self.optimizer)
|
|
|
self.scaler.update()
|
|
|
self.optimizer.zero_grad()
|
|
|
# ema
|
|
|
if self.model_ema is not None:
|
|
|
self.model_ema.update(model)
|
|
|
- self.last_opt_step = ni
|
|
|
|
|
|
- # display
|
|
|
+ # Logs
|
|
|
if distributed_utils.is_main_process() and iter_i % 10 == 0:
|
|
|
t1 = time.time()
|
|
|
cur_lr = [param_group['lr'] for param_group in self.optimizer.param_groups]
|
|
|
@@ -273,7 +260,10 @@ class Yolov8Trainer(object):
|
|
|
log += '[lr: {:.6f}]'.format(cur_lr[2])
|
|
|
# loss infor
|
|
|
for k in loss_dict_reduced.keys():
|
|
|
- log += '[{}: {:.2f}]'.format(k, loss_dict_reduced[k])
|
|
|
+ loss_val = loss_dict_reduced[k]
|
|
|
+ if k == 'losses':
|
|
|
+ loss_val *= self.grad_accumulate
|
|
|
+ log += '[{}: {:.2f}]'.format(k, loss_val)
|
|
|
|
|
|
# other infor
|
|
|
log += '[time: {:.2f}]'.format(t1 - t0)
|
|
|
@@ -283,12 +273,14 @@ class Yolov8Trainer(object):
|
|
|
print(log, flush=True)
|
|
|
|
|
|
t0 = time.time()
|
|
|
-
|
|
|
+
|
|
|
if self.args.debug:
|
|
|
print("For debug mode, we only train 1 iteration")
|
|
|
break
|
|
|
|
|
|
- self.lr_scheduler.step()
|
|
|
+ # LR Schedule
|
|
|
+ if not self.second_stage:
|
|
|
+ self.lr_scheduler.step()
|
|
|
|
|
|
def check_second_stage(self):
|
|
|
# set second stage
|
|
|
@@ -369,8 +361,13 @@ class Yolov8Trainer(object):
|
|
|
|
|
|
# During training phase, the shape of input image is square.
|
|
|
old_img_size = images.shape[-1]
|
|
|
- new_img_size = random.randrange(old_img_size * multi_scale_range[0], old_img_size * multi_scale_range[1] + max_stride)
|
|
|
+ min_img_size = old_img_size * multi_scale_range[0]
|
|
|
+ max_img_size = old_img_size * multi_scale_range[1]
|
|
|
+
|
|
|
+ # Choose a new image size
|
|
|
+ new_img_size = random.randrange(min_img_size, max_img_size + max_stride, max_stride)
|
|
|
new_img_size = new_img_size // max_stride * max_stride # size
|
|
|
+
|
|
|
if new_img_size / old_img_size != 1:
|
|
|
# interpolate
|
|
|
images = torch.nn.functional.interpolate(
|
|
|
@@ -396,8 +393,8 @@ class Yolov8Trainer(object):
|
|
|
|
|
|
return images, targets, new_img_size
|
|
|
|
|
|
-## YOLOX Trainer
|
|
|
-class YoloxTrainer(object):
|
|
|
+## Real-time Convolutional Object Detector Trainer
|
|
|
+class RTCTrainer(object):
|
|
|
def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
|
|
|
# ------------------- basic parameters -------------------
|
|
|
self.args = args
|
|
|
@@ -407,7 +404,7 @@ class YoloxTrainer(object):
|
|
|
self.criterion = criterion
|
|
|
self.world_size = world_size
|
|
|
self.grad_accumulate = args.grad_accumulate
|
|
|
- self.no_aug_epoch = args.no_aug_epoch
|
|
|
+ self.clip_grad = 35
|
|
|
self.heavy_eval = False
|
|
|
# weak augmentatino stage
|
|
|
self.second_stage = False
|
|
|
@@ -418,39 +415,39 @@ class YoloxTrainer(object):
|
|
|
self.path_to_save = os.path.join(args.save_folder, args.dataset, args.model)
|
|
|
os.makedirs(self.path_to_save, exist_ok=True)
|
|
|
|
|
|
- # ---------------------------- Hyperparameters refer to YOLOX ----------------------------
|
|
|
- self.optimizer_dict = {'optimizer': 'sgd', 'momentum': 0.9, 'weight_decay': 5e-4, 'lr0': 0.01}
|
|
|
- self.ema_dict = {'ema_decay': 0.9999, 'ema_tau': 2000}
|
|
|
- self.lr_schedule_dict = {'scheduler': 'cosine', 'lrf': 0.05}
|
|
|
+ # ---------------------------- Hyperparameters refer to RTMDet ----------------------------
|
|
|
+ self.optimizer_dict = {'optimizer': 'adamw', 'momentum': None, 'weight_decay': 5e-2, 'lr0': 0.001}
|
|
|
+ self.ema_dict = {'ema_decay': 0.9998, 'ema_tau': 2000}
|
|
|
+ self.lr_schedule_dict = {'scheduler': 'linear', 'lrf': 0.01}
|
|
|
self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}
|
|
|
|
|
|
# ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
|
|
|
- self.data_cfg = data_cfg
|
|
|
+ self.data_cfg = data_cfg
|
|
|
self.model_cfg = model_cfg
|
|
|
self.trans_cfg = trans_cfg
|
|
|
|
|
|
# ---------------------------- Build Transform ----------------------------
|
|
|
self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
+ args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
self.val_transform, _ = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
|
|
|
+ args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
|
|
|
|
|
|
# ---------------------------- Build Dataset & Dataloader ----------------------------
|
|
|
- self.dataset, self.dataset_info = build_dataset(self.args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
|
|
|
- self.train_loader = build_dataloader(self.args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
|
|
|
+ self.dataset, self.dataset_info = build_dataset(args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
|
|
|
+ self.train_loader = build_dataloader(args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
|
|
|
|
|
|
# ---------------------------- Build Evaluator ----------------------------
|
|
|
- self.evaluator = build_evluator(self.args, self.data_cfg, self.val_transform, self.device)
|
|
|
+ self.evaluator = build_evluator(args, self.data_cfg, self.val_transform, self.device)
|
|
|
|
|
|
# ---------------------------- Build Grad. Scaler ----------------------------
|
|
|
self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
|
|
|
|
|
|
# ---------------------------- Build Optimizer ----------------------------
|
|
|
- self.optimizer_dict['lr0'] *= self.args.batch_size * self.grad_accumulate / 64
|
|
|
- self.optimizer, self.start_epoch = build_yolo_optimizer(self.optimizer_dict, model, self.args.resume)
|
|
|
+ self.optimizer_dict['lr0'] *= args.batch_size * self.grad_accumulate / 64
|
|
|
+ self.optimizer, self.start_epoch = build_optimizer(self.optimizer_dict, model, args.resume)
|
|
|
|
|
|
# ---------------------------- Build LR Scheduler ----------------------------
|
|
|
- self.lr_scheduler, self.lf = build_lambda_lr_scheduler(self.lr_schedule_dict, self.optimizer, self.args.max_epoch - self.no_aug_epoch)
|
|
|
+ self.lr_scheduler, self.lf = build_lambda_lr_scheduler(self.lr_schedule_dict, self.optimizer, args.max_epoch)
|
|
|
self.lr_scheduler.last_epoch = self.start_epoch - 1 # do not move
|
|
|
if self.args.resume and self.args.resume != 'None':
|
|
|
self.lr_scheduler.step()
|
|
|
@@ -462,7 +459,6 @@ class YoloxTrainer(object):
|
|
|
else:
|
|
|
self.model_ema = None
|
|
|
|
|
|
-
|
|
|
def train(self, model):
|
|
|
for epoch in range(self.start_epoch, self.args.max_epoch):
|
|
|
if self.args.distributed:
|
|
|
@@ -495,7 +491,7 @@ class YoloxTrainer(object):
|
|
|
'epoch': self.epoch,
|
|
|
'args': self.args},
|
|
|
checkpoint_path)
|
|
|
-
|
|
|
+
|
|
|
# train one epoch
|
|
|
self.epoch = epoch
|
|
|
self.train_one_epoch(model)
|
|
|
@@ -565,14 +561,21 @@ class YoloxTrainer(object):
|
|
|
dist.barrier()
|
|
|
|
|
|
def train_one_epoch(self, model):
|
|
|
+ metric_logger = MetricLogger(delimiter=" ")
|
|
|
+ metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
|
|
+ metric_logger.add_meter('size', SmoothedValue(window_size=1, fmt='{value:d}'))
|
|
|
+ metric_logger.add_meter('grad_norm', SmoothedValue(window_size=1, fmt='{value:.1f}'))
|
|
|
+ header = 'Epoch: [{} / {}]'.format(self.epoch, self.args.max_epoch)
|
|
|
+ epoch_size = len(self.train_loader)
|
|
|
+ print_freq = 10
|
|
|
+
|
|
|
# basic parameters
|
|
|
epoch_size = len(self.train_loader)
|
|
|
img_size = self.args.img_size
|
|
|
- t0 = time.time()
|
|
|
nw = epoch_size * self.args.wp_epoch
|
|
|
|
|
|
# Train one epoch
|
|
|
- for iter_i, (images, targets) in enumerate(self.train_loader):
|
|
|
+ for iter_i, (images, targets) in enumerate(metric_logger.log_every(self.train_loader, print_freq, header)):
|
|
|
ni = iter_i + self.epoch * epoch_size
|
|
|
# Warmup
|
|
|
if ni <= nw:
|
|
|
@@ -588,7 +591,7 @@ class YoloxTrainer(object):
|
|
|
images = images.to(self.device, non_blocking=True).float()
|
|
|
|
|
|
# Multi scale
|
|
|
- if self.args.multi_scale and ni % 10 == 0:
|
|
|
+ if self.args.multi_scale:
|
|
|
images, targets, img_size = self.rescale_image_targets(
|
|
|
images, targets, self.model_cfg['stride'], self.args.min_box_size, self.model_cfg['multi_scale'])
|
|
|
else:
|
|
|
@@ -604,8 +607,8 @@ class YoloxTrainer(object):
|
|
|
# Compute loss
|
|
|
loss_dict = self.criterion(outputs=outputs, targets=targets, epoch=self.epoch)
|
|
|
losses = loss_dict['losses']
|
|
|
- # Grad Accu
|
|
|
- if self.grad_accumulate > 1:
|
|
|
+ # Grad Accumulate
|
|
|
+ if self.grad_accumulate > 1:
|
|
|
losses /= self.grad_accumulate
|
|
|
|
|
|
loss_dict_reduced = distributed_utils.reduce_dict(loss_dict)
|
|
|
@@ -615,6 +618,13 @@ class YoloxTrainer(object):
|
|
|
|
|
|
# Optimize
|
|
|
if ni % self.grad_accumulate == 0:
|
|
|
+ grad_norm = None
|
|
|
+ if self.clip_grad > 0:
|
|
|
+ # unscale gradients
|
|
|
+ self.scaler.unscale_(self.optimizer)
|
|
|
+ # clip gradients
|
|
|
+ grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.clip_grad)
|
|
|
+ # optimizer.step
|
|
|
self.scaler.step(self.optimizer)
|
|
|
self.scaler.update()
|
|
|
self.optimizer.zero_grad()
|
|
|
@@ -622,29 +632,11 @@ class YoloxTrainer(object):
|
|
|
if self.model_ema is not None:
|
|
|
self.model_ema.update(model)
|
|
|
|
|
|
- # Logs
|
|
|
- if distributed_utils.is_main_process() and iter_i % 10 == 0:
|
|
|
- t1 = time.time()
|
|
|
- cur_lr = [param_group['lr'] for param_group in self.optimizer.param_groups]
|
|
|
- # basic infor
|
|
|
- log = '[Epoch: {}/{}]'.format(self.epoch, self.args.max_epoch)
|
|
|
- log += '[Iter: {}/{}]'.format(iter_i, epoch_size)
|
|
|
- log += '[lr: {:.6f}]'.format(cur_lr[2])
|
|
|
- # loss infor
|
|
|
- for k in loss_dict_reduced.keys():
|
|
|
- loss_val = loss_dict_reduced[k]
|
|
|
- if k == 'losses':
|
|
|
- loss_val *= self.grad_accumulate
|
|
|
- log += '[{}: {:.2f}]'.format(k, loss_val)
|
|
|
-
|
|
|
- # other infor
|
|
|
- log += '[time: {:.2f}]'.format(t1 - t0)
|
|
|
- log += '[size: {}]'.format(img_size)
|
|
|
-
|
|
|
- # print log infor
|
|
|
- print(log, flush=True)
|
|
|
-
|
|
|
- t0 = time.time()
|
|
|
+ # Update log
|
|
|
+ metric_logger.update(**loss_dict_reduced)
|
|
|
+ metric_logger.update(lr=self.optimizer.param_groups[2]["lr"])
|
|
|
+ metric_logger.update(grad_norm=grad_norm)
|
|
|
+ metric_logger.update(size=img_size)
|
|
|
|
|
|
if self.args.debug:
|
|
|
print("For debug mode, we only train 1 iteration")
|
|
|
@@ -653,60 +645,11 @@ class YoloxTrainer(object):
|
|
|
# LR Schedule
|
|
|
if not self.second_stage:
|
|
|
self.lr_scheduler.step()
|
|
|
-
|
|
|
- def check_second_stage(self):
|
|
|
- # set second stage
|
|
|
- print('============== Second stage of Training ==============')
|
|
|
- self.second_stage = True
|
|
|
-
|
|
|
- # close mosaic augmentation
|
|
|
- if self.train_loader.dataset.mosaic_prob > 0.:
|
|
|
- print(' - Close < Mosaic Augmentation > ...')
|
|
|
- self.train_loader.dataset.mosaic_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close mixup augmentation
|
|
|
- if self.train_loader.dataset.mixup_prob > 0.:
|
|
|
- print(' - Close < Mixup Augmentation > ...')
|
|
|
- self.train_loader.dataset.mixup_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close rotation augmentation
|
|
|
- if 'degrees' in self.trans_cfg.keys() and self.trans_cfg['degrees'] > 0.0:
|
|
|
- print(' - Close < degress of rotation > ...')
|
|
|
- self.trans_cfg['degrees'] = 0.0
|
|
|
- if 'shear' in self.trans_cfg.keys() and self.trans_cfg['shear'] > 0.0:
|
|
|
- print(' - Close < shear of rotation >...')
|
|
|
- self.trans_cfg['shear'] = 0.0
|
|
|
- if 'perspective' in self.trans_cfg.keys() and self.trans_cfg['perspective'] > 0.0:
|
|
|
- print(' - Close < perspective of rotation > ...')
|
|
|
- self.trans_cfg['perspective'] = 0.0
|
|
|
-
|
|
|
- # build a new transform for second stage
|
|
|
- print(' - Rebuild transforms ...')
|
|
|
- self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.train_loader.dataset.transform = self.train_transform
|
|
|
-
|
|
|
- def check_third_stage(self):
|
|
|
- # set third stage
|
|
|
- print('============== Third stage of Training ==============')
|
|
|
- self.third_stage = True
|
|
|
|
|
|
- # close random affine
|
|
|
- if 'translate' in self.trans_cfg.keys() and self.trans_cfg['translate'] > 0.0:
|
|
|
- print(' - Close < translate of affine > ...')
|
|
|
- self.trans_cfg['translate'] = 0.0
|
|
|
- if 'scale' in self.trans_cfg.keys():
|
|
|
- print(' - Close < scale of affine >...')
|
|
|
- self.trans_cfg['scale'] = [1.0, 1.0]
|
|
|
+ # Gather the stats from all processes
|
|
|
+ metric_logger.synchronize_between_processes()
|
|
|
+ print("Averaged stats:", metric_logger)
|
|
|
|
|
|
- # build a new transform for second stage
|
|
|
- print(' - Rebuild transforms ...')
|
|
|
- self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.train_loader.dataset.transform = self.train_transform
|
|
|
-
|
|
|
def refine_targets(self, targets, min_box_size):
|
|
|
# rescale targets
|
|
|
for tgt in targets:
|
|
|
@@ -733,8 +676,13 @@ class YoloxTrainer(object):
|
|
|
|
|
|
# During training phase, the shape of input image is square.
|
|
|
old_img_size = images.shape[-1]
|
|
|
- new_img_size = random.randrange(old_img_size * multi_scale_range[0], old_img_size * multi_scale_range[1] + max_stride)
|
|
|
+ min_img_size = old_img_size * multi_scale_range[0]
|
|
|
+ max_img_size = old_img_size * multi_scale_range[1]
|
|
|
+
|
|
|
+ # Choose a new image size
|
|
|
+ new_img_size = random.randrange(min_img_size, max_img_size + max_stride, max_stride)
|
|
|
new_img_size = new_img_size // max_stride * max_stride # size
|
|
|
+
|
|
|
if new_img_size / old_img_size != 1:
|
|
|
# interpolate
|
|
|
images = torch.nn.functional.interpolate(
|
|
|
@@ -760,746 +708,67 @@ class YoloxTrainer(object):
|
|
|
|
|
|
return images, targets, new_img_size
|
|
|
|
|
|
-## Real-time Convolutional Object Detector Trainer
|
|
|
-class RTCTrainer(object):
|
|
|
- def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
|
|
|
- # ------------------- basic parameters -------------------
|
|
|
- self.args = args
|
|
|
- self.epoch = 0
|
|
|
- self.best_map = -1.
|
|
|
- self.device = device
|
|
|
- self.criterion = criterion
|
|
|
- self.world_size = world_size
|
|
|
- self.grad_accumulate = args.grad_accumulate
|
|
|
- self.clip_grad = 35
|
|
|
- self.heavy_eval = False
|
|
|
- # weak augmentatino stage
|
|
|
- self.second_stage = False
|
|
|
- self.third_stage = False
|
|
|
- self.second_stage_epoch = args.no_aug_epoch
|
|
|
- self.third_stage_epoch = args.no_aug_epoch // 2
|
|
|
- # path to save model
|
|
|
- self.path_to_save = os.path.join(args.save_folder, args.dataset, args.model)
|
|
|
- os.makedirs(self.path_to_save, exist_ok=True)
|
|
|
+ def check_second_stage(self):
|
|
|
+ # set second stage
|
|
|
+ print('============== Second stage of Training ==============')
|
|
|
+ self.second_stage = True
|
|
|
|
|
|
- # ---------------------------- Hyperparameters refer to RTMDet ----------------------------
|
|
|
- self.optimizer_dict = {'optimizer': 'adamw', 'momentum': None, 'weight_decay': 5e-2, 'lr0': 0.001}
|
|
|
- self.ema_dict = {'ema_decay': 0.9998, 'ema_tau': 2000}
|
|
|
- self.lr_schedule_dict = {'scheduler': 'linear', 'lrf': 0.01}
|
|
|
- self.warmup_dict = {'warmup_momentum': 0.8, 'warmup_bias_lr': 0.1}
|
|
|
+ # close mosaic augmentation
|
|
|
+ if self.train_loader.dataset.mosaic_prob > 0.:
|
|
|
+ print(' - Close < Mosaic Augmentation > ...')
|
|
|
+ self.train_loader.dataset.mosaic_prob = 0.
|
|
|
+ self.heavy_eval = True
|
|
|
|
|
|
- # ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
|
|
|
- self.data_cfg = data_cfg
|
|
|
- self.model_cfg = model_cfg
|
|
|
- self.trans_cfg = trans_cfg
|
|
|
+ # close mixup augmentation
|
|
|
+ if self.train_loader.dataset.mixup_prob > 0.:
|
|
|
+ print(' - Close < Mixup Augmentation > ...')
|
|
|
+ self.train_loader.dataset.mixup_prob = 0.
|
|
|
+ self.heavy_eval = True
|
|
|
|
|
|
- # ---------------------------- Build Transform ----------------------------
|
|
|
+ # close rotation augmentation
|
|
|
+ if 'degrees' in self.trans_cfg.keys() and self.trans_cfg['degrees'] > 0.0:
|
|
|
+ print(' - Close < degress of rotation > ...')
|
|
|
+ self.trans_cfg['degrees'] = 0.0
|
|
|
+ if 'shear' in self.trans_cfg.keys() and self.trans_cfg['shear'] > 0.0:
|
|
|
+ print(' - Close < shear of rotation >...')
|
|
|
+ self.trans_cfg['shear'] = 0.0
|
|
|
+ if 'perspective' in self.trans_cfg.keys() and self.trans_cfg['perspective'] > 0.0:
|
|
|
+ print(' - Close < perspective of rotation > ...')
|
|
|
+ self.trans_cfg['perspective'] = 0.0
|
|
|
+
|
|
|
+ # build a new transform for second stage
|
|
|
+ print(' - Rebuild transforms ...')
|
|
|
self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.val_transform, _ = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
|
|
|
+ args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
+ self.train_loader.dataset.transform = self.train_transform
|
|
|
+
|
|
|
+ def check_third_stage(self):
|
|
|
+ # set third stage
|
|
|
+ print('============== Third stage of Training ==============')
|
|
|
+ self.third_stage = True
|
|
|
|
|
|
- # ---------------------------- Build Dataset & Dataloader ----------------------------
|
|
|
- self.dataset, self.dataset_info = build_dataset(args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
|
|
|
- self.train_loader = build_dataloader(args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
|
|
|
-
|
|
|
- # ---------------------------- Build Evaluator ----------------------------
|
|
|
- self.evaluator = build_evluator(args, self.data_cfg, self.val_transform, self.device)
|
|
|
-
|
|
|
- # ---------------------------- Build Grad. Scaler ----------------------------
|
|
|
- self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
|
|
|
-
|
|
|
- # ---------------------------- Build Optimizer ----------------------------
|
|
|
- self.optimizer_dict['lr0'] *= args.batch_size * self.grad_accumulate / 64
|
|
|
- self.optimizer, self.start_epoch = build_yolo_optimizer(self.optimizer_dict, model, args.resume)
|
|
|
-
|
|
|
- # ---------------------------- Build LR Scheduler ----------------------------
|
|
|
- self.lr_scheduler, self.lf = build_lambda_lr_scheduler(self.lr_schedule_dict, self.optimizer, args.max_epoch)
|
|
|
- self.lr_scheduler.last_epoch = self.start_epoch - 1 # do not move
|
|
|
- if self.args.resume and self.args.resume != 'None':
|
|
|
- self.lr_scheduler.step()
|
|
|
-
|
|
|
- # ---------------------------- Build Model-EMA ----------------------------
|
|
|
- if self.args.ema and distributed_utils.get_rank() in [-1, 0]:
|
|
|
- print('Build ModelEMA ...')
|
|
|
- self.model_ema = ModelEMA(self.ema_dict, model, self.start_epoch * len(self.train_loader))
|
|
|
- else:
|
|
|
- self.model_ema = None
|
|
|
-
|
|
|
- def train(self, model):
|
|
|
- for epoch in range(self.start_epoch, self.args.max_epoch):
|
|
|
- if self.args.distributed:
|
|
|
- self.train_loader.batch_sampler.sampler.set_epoch(epoch)
|
|
|
-
|
|
|
- # check second stage
|
|
|
- if epoch >= (self.args.max_epoch - self.second_stage_epoch - 1) and not self.second_stage:
|
|
|
- self.check_second_stage()
|
|
|
- # save model of the last mosaic epoch
|
|
|
- weight_name = '{}_last_mosaic_epoch.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- print('Saving state of the last Mosaic epoch-{}.'.format(self.epoch))
|
|
|
- torch.save({'model': model.state_dict(),
|
|
|
- 'mAP': round(self.evaluator.map*100, 1),
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
-
|
|
|
- # check third stage
|
|
|
- if epoch >= (self.args.max_epoch - self.third_stage_epoch - 1) and not self.third_stage:
|
|
|
- self.check_third_stage()
|
|
|
- # save model of the last mosaic epoch
|
|
|
- weight_name = '{}_last_weak_augment_epoch.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- print('Saving state of the last weak augment epoch-{}.'.format(self.epoch))
|
|
|
- torch.save({'model': model.state_dict(),
|
|
|
- 'mAP': round(self.evaluator.map*100, 1),
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
-
|
|
|
- # train one epoch
|
|
|
- self.epoch = epoch
|
|
|
- self.train_one_epoch(model)
|
|
|
-
|
|
|
- # eval one epoch
|
|
|
- if self.heavy_eval:
|
|
|
- model_eval = model.module if self.args.distributed else model
|
|
|
- self.eval(model_eval)
|
|
|
- else:
|
|
|
- model_eval = model.module if self.args.distributed else model
|
|
|
- if (epoch % self.args.eval_epoch) == 0 or (epoch == self.args.max_epoch - 1):
|
|
|
- self.eval(model_eval)
|
|
|
-
|
|
|
- if self.args.debug:
|
|
|
- print("For debug mode, we only train 1 epoch")
|
|
|
- break
|
|
|
-
|
|
|
- def eval(self, model):
|
|
|
- # chech model
|
|
|
- model_eval = model if self.model_ema is None else self.model_ema.ema
|
|
|
-
|
|
|
- if distributed_utils.is_main_process():
|
|
|
- # check evaluator
|
|
|
- if self.evaluator is None:
|
|
|
- print('No evaluator ... save model and go on training.')
|
|
|
- print('Saving state, epoch: {}'.format(self.epoch))
|
|
|
- weight_name = '{}_no_eval.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- torch.save({'model': model_eval.state_dict(),
|
|
|
- 'mAP': -1.,
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
- else:
|
|
|
- print('eval ...')
|
|
|
- # set eval mode
|
|
|
- model_eval.trainable = False
|
|
|
- model_eval.eval()
|
|
|
-
|
|
|
- # evaluate
|
|
|
- with torch.no_grad():
|
|
|
- self.evaluator.evaluate(model_eval)
|
|
|
-
|
|
|
- # save model
|
|
|
- cur_map = self.evaluator.map
|
|
|
- if cur_map > self.best_map:
|
|
|
- # update best-map
|
|
|
- self.best_map = cur_map
|
|
|
- # save model
|
|
|
- print('Saving state, epoch:', self.epoch)
|
|
|
- weight_name = '{}_best.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- torch.save({'model': model_eval.state_dict(),
|
|
|
- 'mAP': round(self.best_map*100, 1),
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
-
|
|
|
- # set train mode.
|
|
|
- model_eval.trainable = True
|
|
|
- model_eval.train()
|
|
|
-
|
|
|
- if self.args.distributed:
|
|
|
- # wait for all processes to synchronize
|
|
|
- dist.barrier()
|
|
|
-
|
|
|
- def train_one_epoch(self, model):
|
|
|
- metric_logger = MetricLogger(delimiter=" ")
|
|
|
- metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
|
|
- metric_logger.add_meter('size', SmoothedValue(window_size=1, fmt='{value:d}'))
|
|
|
- metric_logger.add_meter('grad_norm', SmoothedValue(window_size=1, fmt='{value:.1f}'))
|
|
|
- header = 'Epoch: [{} / {}]'.format(self.epoch, self.args.max_epoch)
|
|
|
- epoch_size = len(self.train_loader)
|
|
|
- print_freq = 10
|
|
|
-
|
|
|
- # basic parameters
|
|
|
- epoch_size = len(self.train_loader)
|
|
|
- img_size = self.args.img_size
|
|
|
- nw = epoch_size * self.args.wp_epoch
|
|
|
-
|
|
|
- # Train one epoch
|
|
|
- for iter_i, (images, targets) in enumerate(metric_logger.log_every(self.train_loader, print_freq, header)):
|
|
|
- ni = iter_i + self.epoch * epoch_size
|
|
|
- # Warmup
|
|
|
- if ni <= nw:
|
|
|
- xi = [0, nw] # x interp
|
|
|
- for j, x in enumerate(self.optimizer.param_groups):
|
|
|
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
|
|
|
- x['lr'] = np.interp(
|
|
|
- ni, xi, [self.warmup_dict['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * self.lf(self.epoch)])
|
|
|
- if 'momentum' in x:
|
|
|
- x['momentum'] = np.interp(ni, xi, [self.warmup_dict['warmup_momentum'], self.optimizer_dict['momentum']])
|
|
|
-
|
|
|
- # To device
|
|
|
- images = images.to(self.device, non_blocking=True).float()
|
|
|
-
|
|
|
- # Multi scale
|
|
|
- if self.args.multi_scale:
|
|
|
- images, targets, img_size = self.rescale_image_targets(
|
|
|
- images, targets, self.model_cfg['stride'], self.args.min_box_size, self.model_cfg['multi_scale'])
|
|
|
- else:
|
|
|
- targets = self.refine_targets(targets, self.args.min_box_size)
|
|
|
-
|
|
|
- # Visualize train targets
|
|
|
- if self.args.vis_tgt:
|
|
|
- vis_data(images*255, targets)
|
|
|
-
|
|
|
- # Inference
|
|
|
- with torch.cuda.amp.autocast(enabled=self.args.fp16):
|
|
|
- outputs = model(images)
|
|
|
- # Compute loss
|
|
|
- loss_dict = self.criterion(outputs=outputs, targets=targets, epoch=self.epoch)
|
|
|
- losses = loss_dict['losses']
|
|
|
- # Grad Accumulate
|
|
|
- if self.grad_accumulate > 1:
|
|
|
- losses /= self.grad_accumulate
|
|
|
-
|
|
|
- loss_dict_reduced = distributed_utils.reduce_dict(loss_dict)
|
|
|
-
|
|
|
- # Backward
|
|
|
- self.scaler.scale(losses).backward()
|
|
|
-
|
|
|
- # Optimize
|
|
|
- if ni % self.grad_accumulate == 0:
|
|
|
- grad_norm = None
|
|
|
- if self.clip_grad > 0:
|
|
|
- # unscale gradients
|
|
|
- self.scaler.unscale_(self.optimizer)
|
|
|
- # clip gradients
|
|
|
- grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.clip_grad)
|
|
|
- # optimizer.step
|
|
|
- self.scaler.step(self.optimizer)
|
|
|
- self.scaler.update()
|
|
|
- self.optimizer.zero_grad()
|
|
|
- # ema
|
|
|
- if self.model_ema is not None:
|
|
|
- self.model_ema.update(model)
|
|
|
-
|
|
|
- # Update log
|
|
|
- metric_logger.update(**loss_dict_reduced)
|
|
|
- metric_logger.update(lr=self.optimizer.param_groups[2]["lr"])
|
|
|
- metric_logger.update(grad_norm=grad_norm)
|
|
|
- metric_logger.update(size=img_size)
|
|
|
-
|
|
|
- if self.args.debug:
|
|
|
- print("For debug mode, we only train 1 iteration")
|
|
|
- break
|
|
|
-
|
|
|
- # LR Schedule
|
|
|
- if not self.second_stage:
|
|
|
- self.lr_scheduler.step()
|
|
|
-
|
|
|
- # Gather the stats from all processes
|
|
|
- metric_logger.synchronize_between_processes()
|
|
|
- print("Averaged stats:", metric_logger)
|
|
|
-
|
|
|
- def refine_targets(self, targets, min_box_size):
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes = tgt["boxes"].clone()
|
|
|
- labels = tgt["labels"].clone()
|
|
|
- # refine tgt
|
|
|
- tgt_boxes_wh = boxes[..., 2:] - boxes[..., :2]
|
|
|
- min_tgt_size = torch.min(tgt_boxes_wh, dim=-1)[0]
|
|
|
- keep = (min_tgt_size >= min_box_size)
|
|
|
-
|
|
|
- tgt["boxes"] = boxes[keep]
|
|
|
- tgt["labels"] = labels[keep]
|
|
|
-
|
|
|
- return targets
|
|
|
-
|
|
|
- def rescale_image_targets(self, images, targets, stride, min_box_size, multi_scale_range=[0.5, 1.5]):
|
|
|
- """
|
|
|
- Deployed for Multi scale trick.
|
|
|
- """
|
|
|
- if isinstance(stride, int):
|
|
|
- max_stride = stride
|
|
|
- elif isinstance(stride, list):
|
|
|
- max_stride = max(stride)
|
|
|
-
|
|
|
- # During training phase, the shape of input image is square.
|
|
|
- old_img_size = images.shape[-1]
|
|
|
- new_img_size = random.randrange(old_img_size * multi_scale_range[0], old_img_size * multi_scale_range[1] + max_stride)
|
|
|
- new_img_size = new_img_size // max_stride * max_stride # size
|
|
|
- if new_img_size / old_img_size != 1:
|
|
|
- # interpolate
|
|
|
- images = torch.nn.functional.interpolate(
|
|
|
- input=images,
|
|
|
- size=new_img_size,
|
|
|
- mode='bilinear',
|
|
|
- align_corners=False)
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes = tgt["boxes"].clone()
|
|
|
- labels = tgt["labels"].clone()
|
|
|
- boxes = torch.clamp(boxes, 0, old_img_size)
|
|
|
- # rescale box
|
|
|
- boxes[:, [0, 2]] = boxes[:, [0, 2]] / old_img_size * new_img_size
|
|
|
- boxes[:, [1, 3]] = boxes[:, [1, 3]] / old_img_size * new_img_size
|
|
|
- # refine tgt
|
|
|
- tgt_boxes_wh = boxes[..., 2:] - boxes[..., :2]
|
|
|
- min_tgt_size = torch.min(tgt_boxes_wh, dim=-1)[0]
|
|
|
- keep = (min_tgt_size >= min_box_size)
|
|
|
-
|
|
|
- tgt["boxes"] = boxes[keep]
|
|
|
- tgt["labels"] = labels[keep]
|
|
|
-
|
|
|
- return images, targets, new_img_size
|
|
|
-
|
|
|
- def check_second_stage(self):
|
|
|
- # set second stage
|
|
|
- print('============== Second stage of Training ==============')
|
|
|
- self.second_stage = True
|
|
|
-
|
|
|
- # close mosaic augmentation
|
|
|
- if self.train_loader.dataset.mosaic_prob > 0.:
|
|
|
- print(' - Close < Mosaic Augmentation > ...')
|
|
|
- self.train_loader.dataset.mosaic_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close mixup augmentation
|
|
|
- if self.train_loader.dataset.mixup_prob > 0.:
|
|
|
- print(' - Close < Mixup Augmentation > ...')
|
|
|
- self.train_loader.dataset.mixup_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close rotation augmentation
|
|
|
- if 'degrees' in self.trans_cfg.keys() and self.trans_cfg['degrees'] > 0.0:
|
|
|
- print(' - Close < degress of rotation > ...')
|
|
|
- self.trans_cfg['degrees'] = 0.0
|
|
|
- if 'shear' in self.trans_cfg.keys() and self.trans_cfg['shear'] > 0.0:
|
|
|
- print(' - Close < shear of rotation >...')
|
|
|
- self.trans_cfg['shear'] = 0.0
|
|
|
- if 'perspective' in self.trans_cfg.keys() and self.trans_cfg['perspective'] > 0.0:
|
|
|
- print(' - Close < perspective of rotation > ...')
|
|
|
- self.trans_cfg['perspective'] = 0.0
|
|
|
-
|
|
|
- # build a new transform for second stage
|
|
|
- print(' - Rebuild transforms ...')
|
|
|
- self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.train_loader.dataset.transform = self.train_transform
|
|
|
-
|
|
|
- def check_third_stage(self):
|
|
|
- # set third stage
|
|
|
- print('============== Third stage of Training ==============')
|
|
|
- self.third_stage = True
|
|
|
-
|
|
|
- # close random affine
|
|
|
- if 'translate' in self.trans_cfg.keys() and self.trans_cfg['translate'] > 0.0:
|
|
|
- print(' - Close < translate of affine > ...')
|
|
|
- self.trans_cfg['translate'] = 0.0
|
|
|
- if 'scale' in self.trans_cfg.keys():
|
|
|
- print(' - Close < scale of affine >...')
|
|
|
- self.trans_cfg['scale'] = [1.0, 1.0]
|
|
|
-
|
|
|
- # build a new transform for second stage
|
|
|
- print(' - Rebuild transforms ...')
|
|
|
- self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.train_loader.dataset.transform = self.train_transform
|
|
|
-
|
|
|
-## Real-time DETR Trainer
|
|
|
-class RTDetrTrainer(object):
|
|
|
- def __init__(self, args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
|
|
|
- # ------------------- Basic parameters -------------------
|
|
|
- self.args = args
|
|
|
- self.epoch = 0
|
|
|
- self.best_map = -1.
|
|
|
- self.device = device
|
|
|
- self.criterion = criterion
|
|
|
- self.world_size = world_size
|
|
|
- self.grad_accumulate = args.grad_accumulate
|
|
|
- self.clip_grad = 0.1
|
|
|
- self.heavy_eval = False
|
|
|
- self.normalize_bbox = True
|
|
|
- # close AMP for RT-DETR
|
|
|
- self.args.fp16 = False
|
|
|
- # weak augmentatino stage
|
|
|
- self.second_stage = False
|
|
|
- self.second_stage_epoch = -1
|
|
|
- # path to save model
|
|
|
- self.path_to_save = os.path.join(args.save_folder, args.dataset, args.model)
|
|
|
- os.makedirs(self.path_to_save, exist_ok=True)
|
|
|
-
|
|
|
- # ---------------------------- Hyperparameters refer to RTMDet ----------------------------
|
|
|
- self.optimizer_dict = {'optimizer': 'adamw', 'momentum': None, 'weight_decay': 0.0001, 'lr0': 0.0001, 'backbone_lr_ratio': 0.1}
|
|
|
- self.warmup_dict = {'warmup': 'linear', 'warmup_iters': 2000, 'warmup_factor': 0.00066667}
|
|
|
- self.lr_schedule_dict = {'lr_scheduler': 'step', 'lr_epoch': [self.args.max_epoch // 12 * 11]}
|
|
|
- self.ema_dict = {'ema_decay': 0.9999, 'ema_tau': 2000}
|
|
|
-
|
|
|
- # ---------------------------- Build Dataset & Model & Trans. Config ----------------------------
|
|
|
- self.data_cfg = data_cfg
|
|
|
- self.model_cfg = model_cfg
|
|
|
- self.trans_cfg = trans_cfg
|
|
|
-
|
|
|
- # ---------------------------- Build Transform ----------------------------
|
|
|
- self.train_transform, self.trans_cfg = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
- self.val_transform, _ = build_transform(
|
|
|
- args=args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=False)
|
|
|
- if self.trans_cfg["mosaic_prob"] > 0:
|
|
|
- self.second_stage_epoch = 5
|
|
|
-
|
|
|
- # ---------------------------- Build Dataset & Dataloader ----------------------------
|
|
|
- self.dataset, self.dataset_info = build_dataset(args, self.data_cfg, self.trans_cfg, self.train_transform, is_train=True)
|
|
|
- self.train_loader = build_dataloader(args, self.dataset, self.args.batch_size // self.world_size, CollateFunc())
|
|
|
-
|
|
|
- # ---------------------------- Build Evaluator ----------------------------
|
|
|
- self.evaluator = build_evluator(args, self.data_cfg, self.val_transform, self.device)
|
|
|
-
|
|
|
- # ---------------------------- Build Grad. Scaler ----------------------------
|
|
|
- self.scaler = torch.cuda.amp.GradScaler(enabled=self.args.fp16)
|
|
|
-
|
|
|
- # ---------------------------- Build Optimizer ----------------------------
|
|
|
- self.optimizer_dict['lr0'] *= self.args.batch_size / 16. # auto lr scaling
|
|
|
- self.optimizer, self.start_epoch = build_rtdetr_optimizer(self.optimizer_dict, model, self.args.resume)
|
|
|
-
|
|
|
- # ---------------------------- Build LR Scheduler ----------------------------
|
|
|
- self.wp_lr_scheduler = build_wp_lr_scheduler(self.warmup_dict, self.optimizer_dict['lr0'])
|
|
|
- self.lr_scheduler = build_lr_scheduler(self.lr_schedule_dict, self.optimizer, args.resume)
|
|
|
-
|
|
|
- # ---------------------------- Build Model-EMA ----------------------------
|
|
|
- if self.args.ema and distributed_utils.get_rank() in [-1, 0]:
|
|
|
- print('Build ModelEMA ...')
|
|
|
- self.model_ema = ModelEMA(self.ema_dict, model, self.start_epoch * len(self.train_loader))
|
|
|
- else:
|
|
|
- self.model_ema = None
|
|
|
-
|
|
|
- def train(self, model):
|
|
|
- for epoch in range(self.start_epoch, self.args.max_epoch):
|
|
|
- if self.args.distributed:
|
|
|
- self.train_loader.batch_sampler.sampler.set_epoch(epoch)
|
|
|
-
|
|
|
- # check second stage
|
|
|
- if epoch >= (self.args.max_epoch - self.second_stage_epoch - 1) and not self.second_stage:
|
|
|
- self.check_second_stage()
|
|
|
- # save model of the last mosaic epoch
|
|
|
- weight_name = '{}_last_mosaic_epoch.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- print('Saving state of the last Mosaic epoch-{}.'.format(self.epoch))
|
|
|
- torch.save({'model': model.state_dict(),
|
|
|
- 'mAP': round(self.evaluator.map*100, 1),
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
-
|
|
|
- # train one epoch
|
|
|
- self.epoch = epoch
|
|
|
- self.train_one_epoch(model)
|
|
|
-
|
|
|
- # eval one epoch
|
|
|
- if self.heavy_eval:
|
|
|
- model_eval = model.module if self.args.distributed else model
|
|
|
- self.eval(model_eval)
|
|
|
- else:
|
|
|
- model_eval = model.module if self.args.distributed else model
|
|
|
- if (epoch % self.args.eval_epoch) == 0 or (epoch == self.args.max_epoch - 1):
|
|
|
- self.eval(model_eval)
|
|
|
-
|
|
|
- if self.args.debug:
|
|
|
- print("For debug mode, we only train 1 epoch")
|
|
|
- break
|
|
|
-
|
|
|
- def eval(self, model):
|
|
|
- # chech model
|
|
|
- model_eval = model if self.model_ema is None else self.model_ema.ema
|
|
|
-
|
|
|
- if distributed_utils.is_main_process():
|
|
|
- # check evaluator
|
|
|
- if self.evaluator is None:
|
|
|
- print('No evaluator ... save model and go on training.')
|
|
|
- print('Saving state, epoch: {}'.format(self.epoch))
|
|
|
- weight_name = '{}_no_eval.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- torch.save({'model': model_eval.state_dict(),
|
|
|
- 'mAP': -1.,
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
- else:
|
|
|
- print('eval ...')
|
|
|
- # set eval mode
|
|
|
- model_eval.eval()
|
|
|
-
|
|
|
- # evaluate
|
|
|
- with torch.no_grad():
|
|
|
- self.evaluator.evaluate(model_eval)
|
|
|
-
|
|
|
- # save model
|
|
|
- cur_map = self.evaluator.map
|
|
|
- if cur_map > self.best_map:
|
|
|
- # update best-map
|
|
|
- self.best_map = cur_map
|
|
|
- # save model
|
|
|
- print('Saving state, epoch:', self.epoch)
|
|
|
- weight_name = '{}_best.pth'.format(self.args.model)
|
|
|
- checkpoint_path = os.path.join(self.path_to_save, weight_name)
|
|
|
- torch.save({'model': model_eval.state_dict(),
|
|
|
- 'mAP': round(self.best_map*100, 1),
|
|
|
- 'optimizer': self.optimizer.state_dict(),
|
|
|
- 'epoch': self.epoch,
|
|
|
- 'args': self.args},
|
|
|
- checkpoint_path)
|
|
|
-
|
|
|
- # set train mode.
|
|
|
- model_eval.train()
|
|
|
-
|
|
|
- if self.args.distributed:
|
|
|
- # wait for all processes to synchronize
|
|
|
- dist.barrier()
|
|
|
-
|
|
|
- def train_one_epoch(self, model):
|
|
|
- metric_logger = MetricLogger(delimiter=" ")
|
|
|
- metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
|
|
- metric_logger.add_meter('size', SmoothedValue(window_size=1, fmt='{value:d}'))
|
|
|
- metric_logger.add_meter('grad_norm', SmoothedValue(window_size=1, fmt='{value:.1f}'))
|
|
|
- header = 'Epoch: [{} / {}]'.format(self.epoch, self.args.max_epoch)
|
|
|
- epoch_size = len(self.train_loader)
|
|
|
- print_freq = 10
|
|
|
-
|
|
|
- # basic parameters
|
|
|
- epoch_size = len(self.train_loader)
|
|
|
- img_size = self.args.img_size
|
|
|
- nw = self.warmup_dict['warmup_iters']
|
|
|
- lr_warmup_stage = True
|
|
|
-
|
|
|
- # Train one epoch
|
|
|
- for iter_i, (images, targets) in enumerate(metric_logger.log_every(self.train_loader, print_freq, header)):
|
|
|
- ni = iter_i + self.epoch * epoch_size
|
|
|
- # WarmUp
|
|
|
- if ni < nw and lr_warmup_stage:
|
|
|
- self.wp_lr_scheduler(ni, self.optimizer)
|
|
|
- elif ni == nw and lr_warmup_stage:
|
|
|
- print('Warmup stage is over.')
|
|
|
- lr_warmup_stage = False
|
|
|
- self.wp_lr_scheduler.set_lr(self.optimizer, self.optimizer_dict['lr0'], self.optimizer_dict['lr0'])
|
|
|
-
|
|
|
- # To device
|
|
|
- images = images.to(self.device, non_blocking=True).float()
|
|
|
- for tgt in targets:
|
|
|
- tgt['boxes'] = tgt['boxes'].to(self.device)
|
|
|
- tgt['labels'] = tgt['labels'].to(self.device)
|
|
|
-
|
|
|
- # Multi scale
|
|
|
- if self.args.multi_scale:
|
|
|
- images, targets, img_size = self.rescale_image_targets(
|
|
|
- images, targets, self.model_cfg['max_stride'], self.args.min_box_size, self.model_cfg['multi_scale'])
|
|
|
- else:
|
|
|
- targets = self.refine_targets(img_size, targets, self.args.min_box_size)
|
|
|
-
|
|
|
- # xyxy -> cxcywh
|
|
|
- targets = self.box_xyxy_to_cxcywh(targets)
|
|
|
-
|
|
|
- # Visualize train targets
|
|
|
- if self.args.vis_tgt:
|
|
|
- targets = self.box_cxcywh_to_xyxy(targets)
|
|
|
- vis_data(images, targets, normalized_bbox=self.normalize_bbox,
|
|
|
- pixel_mean=self.trans_cfg['pixel_mean'], pixel_std=self.trans_cfg['pixel_std'])
|
|
|
- targets = self.box_xyxy_to_cxcywh(targets)
|
|
|
-
|
|
|
- # Inference
|
|
|
- with torch.cuda.amp.autocast(enabled=self.args.fp16):
|
|
|
- outputs = model(images, targets)
|
|
|
- loss_dict = self.criterion(outputs, targets)
|
|
|
- losses = sum(loss_dict.values())
|
|
|
- # Grad Accumulate
|
|
|
- if self.grad_accumulate > 1:
|
|
|
- losses /= self.grad_accumulate
|
|
|
- loss_dict_reduced = distributed_utils.reduce_dict(loss_dict)
|
|
|
-
|
|
|
- # Backward
|
|
|
- self.scaler.scale(losses).backward()
|
|
|
-
|
|
|
- # Optimize
|
|
|
- if ni % self.grad_accumulate == 0:
|
|
|
- grad_norm = None
|
|
|
- if self.clip_grad > 0:
|
|
|
- # unscale gradients
|
|
|
- self.scaler.unscale_(self.optimizer)
|
|
|
- # clip gradients
|
|
|
- grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=self.clip_grad)
|
|
|
- # optimizer.step
|
|
|
- self.scaler.step(self.optimizer)
|
|
|
- self.scaler.update()
|
|
|
- self.optimizer.zero_grad()
|
|
|
- # ema
|
|
|
- if self.model_ema is not None:
|
|
|
- self.model_ema.update(model)
|
|
|
-
|
|
|
- # Update log
|
|
|
- metric_logger.update(loss=losses.item(), **loss_dict_reduced)
|
|
|
- metric_logger.update(lr=self.optimizer.param_groups[0]["lr"])
|
|
|
- metric_logger.update(grad_norm=grad_norm)
|
|
|
- metric_logger.update(size=img_size)
|
|
|
-
|
|
|
- if self.args.debug:
|
|
|
- print("For debug mode, we only train 1 iteration")
|
|
|
- break
|
|
|
-
|
|
|
- # LR Scheduler
|
|
|
- self.lr_scheduler.step()
|
|
|
-
|
|
|
- def refine_targets(self, img_size, targets, min_box_size):
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes = tgt["boxes"].clone()
|
|
|
- labels = tgt["labels"].clone()
|
|
|
- # refine tgt
|
|
|
- tgt_boxes_wh = boxes[..., 2:] - boxes[..., :2]
|
|
|
- min_tgt_size = torch.min(tgt_boxes_wh, dim=-1)[0]
|
|
|
- keep = (min_tgt_size >= min_box_size)
|
|
|
- if self.normalize_bbox:
|
|
|
- # normalize box
|
|
|
- boxes[:, [0, 2]] = boxes[:, [0, 2]] / img_size
|
|
|
- boxes[:, [1, 3]] = boxes[:, [1, 3]] / img_size
|
|
|
-
|
|
|
- tgt["boxes"] = boxes[keep]
|
|
|
- tgt["labels"] = labels[keep]
|
|
|
-
|
|
|
- return targets
|
|
|
-
|
|
|
- def rescale_image_targets(self, images, targets, stride, min_box_size, multi_scale_range=[0.5, 1.5]):
|
|
|
- """
|
|
|
- Deployed for Multi scale trick.
|
|
|
- """
|
|
|
- if isinstance(stride, int):
|
|
|
- max_stride = stride
|
|
|
- elif isinstance(stride, list):
|
|
|
- max_stride = max(stride)
|
|
|
-
|
|
|
- # During training phase, the shape of input image is square.
|
|
|
- old_img_size = images.shape[-1]
|
|
|
- new_img_size = random.randrange(old_img_size * multi_scale_range[0], old_img_size * multi_scale_range[1] + max_stride)
|
|
|
- new_img_size = new_img_size // max_stride * max_stride # size
|
|
|
- if new_img_size / old_img_size != 1:
|
|
|
- # interpolate
|
|
|
- images = torch.nn.functional.interpolate(
|
|
|
- input=images,
|
|
|
- size=new_img_size,
|
|
|
- mode='bilinear',
|
|
|
- align_corners=False)
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes = tgt["boxes"].clone()
|
|
|
- labels = tgt["labels"].clone()
|
|
|
- boxes = torch.clamp(boxes, 0, old_img_size)
|
|
|
- # rescale box
|
|
|
- boxes[:, [0, 2]] = boxes[:, [0, 2]] / old_img_size * new_img_size
|
|
|
- boxes[:, [1, 3]] = boxes[:, [1, 3]] / old_img_size * new_img_size
|
|
|
- # refine tgt
|
|
|
- tgt_boxes_wh = boxes[..., 2:] - boxes[..., :2]
|
|
|
- min_tgt_size = torch.min(tgt_boxes_wh, dim=-1)[0]
|
|
|
- keep = (min_tgt_size >= min_box_size)
|
|
|
- if self.normalize_bbox:
|
|
|
- # normalize box
|
|
|
- boxes[:, [0, 2]] = boxes[:, [0, 2]] / new_img_size
|
|
|
- boxes[:, [1, 3]] = boxes[:, [1, 3]] / new_img_size
|
|
|
-
|
|
|
- tgt["boxes"] = boxes[keep]
|
|
|
- tgt["labels"] = labels[keep]
|
|
|
-
|
|
|
- return images, targets, new_img_size
|
|
|
-
|
|
|
- def box_xyxy_to_cxcywh(self, targets):
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes_xyxy = tgt["boxes"].clone()
|
|
|
- # rescale box
|
|
|
- cxcy = (boxes_xyxy[..., :2] + boxes_xyxy[..., 2:]) * 0.5
|
|
|
- bwbh = boxes_xyxy[..., 2:] - boxes_xyxy[..., :2]
|
|
|
- boxes_bwbh = torch.cat([cxcy, bwbh], dim=-1)
|
|
|
-
|
|
|
- tgt["boxes"] = boxes_bwbh
|
|
|
-
|
|
|
- return targets
|
|
|
-
|
|
|
- def box_cxcywh_to_xyxy(self, targets):
|
|
|
- # rescale targets
|
|
|
- for tgt in targets:
|
|
|
- boxes_cxcywh = tgt["boxes"].clone()
|
|
|
- # rescale box
|
|
|
- x1y1 = boxes_cxcywh[..., :2] - boxes_cxcywh[..., 2:] * 0.5
|
|
|
- x2y2 = boxes_cxcywh[..., :2] + boxes_cxcywh[..., 2:] * 0.5
|
|
|
- boxes_bwbh = torch.cat([x1y1, x2y2], dim=-1)
|
|
|
-
|
|
|
- tgt["boxes"] = boxes_bwbh
|
|
|
-
|
|
|
- return targets
|
|
|
-
|
|
|
- def check_second_stage(self):
|
|
|
- # set second stage
|
|
|
- print('============== Second stage of Training ==============')
|
|
|
- self.second_stage = True
|
|
|
-
|
|
|
- # close mosaic augmentation
|
|
|
- if self.train_loader.dataset.mosaic_prob > 0.:
|
|
|
- print(' - Close < Mosaic Augmentation > ...')
|
|
|
- self.train_loader.dataset.mosaic_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close mixup augmentation
|
|
|
- if self.train_loader.dataset.mixup_prob > 0.:
|
|
|
- print(' - Close < Mixup Augmentation > ...')
|
|
|
- self.train_loader.dataset.mixup_prob = 0.
|
|
|
- self.heavy_eval = True
|
|
|
-
|
|
|
- # close rotation augmentation
|
|
|
- if 'degrees' in self.trans_cfg.keys() and self.trans_cfg['degrees'] > 0.0:
|
|
|
- print(' - Close < degress of rotation > ...')
|
|
|
- self.trans_cfg['degrees'] = 0.0
|
|
|
- if 'shear' in self.trans_cfg.keys() and self.trans_cfg['shear'] > 0.0:
|
|
|
- print(' - Close < shear of rotation >...')
|
|
|
- self.trans_cfg['shear'] = 0.0
|
|
|
- if 'perspective' in self.trans_cfg.keys() and self.trans_cfg['perspective'] > 0.0:
|
|
|
- print(' - Close < perspective of rotation > ...')
|
|
|
- self.trans_cfg['perspective'] = 0.0
|
|
|
+ # close random affine
|
|
|
+ if 'translate' in self.trans_cfg.keys() and self.trans_cfg['translate'] > 0.0:
|
|
|
+ print(' - Close < translate of affine > ...')
|
|
|
+ self.trans_cfg['translate'] = 0.0
|
|
|
+ if 'scale' in self.trans_cfg.keys():
|
|
|
+ print(' - Close < scale of affine >...')
|
|
|
+ self.trans_cfg['scale'] = [1.0, 1.0]
|
|
|
|
|
|
# build a new transform for second stage
|
|
|
print(' - Rebuild transforms ...')
|
|
|
self.train_transform, self.trans_cfg = build_transform(
|
|
|
args=self.args, trans_config=self.trans_cfg, max_stride=self.model_cfg['max_stride'], is_train=True)
|
|
|
-
|
|
|
self.train_loader.dataset.transform = self.train_transform
|
|
|
|
|
|
|
|
|
# Build Trainer
|
|
|
def build_trainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size):
|
|
|
# ----------------------- Det trainers -----------------------
|
|
|
- if model_cfg['trainer_type'] == 'yolov8':
|
|
|
- return Yolov8Trainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
|
|
|
+ if model_cfg['trainer_type'] == 'yolo':
|
|
|
+ return RTCTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
|
|
|
elif model_cfg['trainer_type'] == 'yolox':
|
|
|
return YoloxTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
|
|
|
- elif model_cfg['trainer_type'] == 'rtcdet':
|
|
|
- return RTCTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
|
|
|
- elif model_cfg['trainer_type'] == 'rtdetr':
|
|
|
- return RTDetrTrainer(args, data_cfg, model_cfg, trans_cfg, device, model, criterion, world_size)
|
|
|
else:
|
|
|
raise NotImplementedError(model_cfg['trainer_type'])
|
|
|
|