|
|
@@ -0,0 +1,572 @@
|
|
|
+# ---------------------------------------------------------------------------
|
|
|
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
|
|
+# ---------------------------------------------------------------------------
|
|
|
+import time
|
|
|
+import math
|
|
|
+import datetime
|
|
|
+import numpy as np
|
|
|
+from typing import List
|
|
|
+from thop import profile
|
|
|
+from copy import deepcopy
|
|
|
+from collections import defaultdict, deque
|
|
|
+
|
|
|
+import torch
|
|
|
+import torch.nn as nn
|
|
|
+import torch.nn.functional as F
|
|
|
+import torch.distributed as dist
|
|
|
+from torch import Tensor
|
|
|
+from .distributed_utils import is_dist_avail_and_initialized
|
|
|
+
|
|
|
+
|
|
|
+# ---------------------------- Train tools ----------------------------
|
|
|
+class SmoothedValue(object):
|
|
|
+ """Track a series of values and provide access to smoothed values over a
|
|
|
+ window or the global series average.
|
|
|
+ """
|
|
|
+
|
|
|
+ def __init__(self, window_size=20, fmt=None):
|
|
|
+ if fmt is None:
|
|
|
+ fmt = "{median:.4f} ({global_avg:.4f})"
|
|
|
+ self.deque = deque(maxlen=window_size)
|
|
|
+ self.total = 0.0
|
|
|
+ self.count = 0
|
|
|
+ self.fmt = fmt
|
|
|
+
|
|
|
+ def update(self, value, n=1):
|
|
|
+ self.deque.append(value)
|
|
|
+ self.count += n
|
|
|
+ self.total += value * n
|
|
|
+
|
|
|
+ def synchronize_between_processes(self):
|
|
|
+ """
|
|
|
+ Warning: does not synchronize the deque!
|
|
|
+ """
|
|
|
+ if not is_dist_avail_and_initialized():
|
|
|
+ return
|
|
|
+ t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
|
|
+ dist.barrier()
|
|
|
+ dist.all_reduce(t)
|
|
|
+ t = t.tolist()
|
|
|
+ self.count = int(t[0])
|
|
|
+ self.total = t[1]
|
|
|
+
|
|
|
+ @property
|
|
|
+ def median(self):
|
|
|
+ d = torch.tensor(list(self.deque))
|
|
|
+ return d.median().item()
|
|
|
+
|
|
|
+ @property
|
|
|
+ def avg(self):
|
|
|
+ d = torch.tensor(list(self.deque), dtype=torch.float32)
|
|
|
+ return d.mean().item()
|
|
|
+
|
|
|
+ @property
|
|
|
+ def global_avg(self):
|
|
|
+ return self.total / self.count
|
|
|
+
|
|
|
+ @property
|
|
|
+ def max(self):
|
|
|
+ return max(self.deque)
|
|
|
+
|
|
|
+ @property
|
|
|
+ def value(self):
|
|
|
+ return self.deque[-1]
|
|
|
+
|
|
|
+ def __str__(self):
|
|
|
+ return self.fmt.format(
|
|
|
+ median=self.median,
|
|
|
+ avg=self.avg,
|
|
|
+ global_avg=self.global_avg,
|
|
|
+ max=self.max,
|
|
|
+ value=self.value)
|
|
|
+
|
|
|
+class MetricLogger(object):
|
|
|
+ def __init__(self, delimiter="\t"):
|
|
|
+ self.meters = defaultdict(SmoothedValue)
|
|
|
+ self.delimiter = delimiter
|
|
|
+
|
|
|
+ def update(self, **kwargs):
|
|
|
+ for k, v in kwargs.items():
|
|
|
+ if isinstance(v, torch.Tensor):
|
|
|
+ v = v.item()
|
|
|
+ assert isinstance(v, (float, int))
|
|
|
+ self.meters[k].update(v)
|
|
|
+
|
|
|
+ def __getattr__(self, attr):
|
|
|
+ if attr in self.meters:
|
|
|
+ return self.meters[attr]
|
|
|
+ if attr in self.__dict__:
|
|
|
+ return self.__dict__[attr]
|
|
|
+ raise AttributeError("'{}' object has no attribute '{}'".format(
|
|
|
+ type(self).__name__, attr))
|
|
|
+
|
|
|
+ def __str__(self):
|
|
|
+ loss_str = []
|
|
|
+ for name, meter in self.meters.items():
|
|
|
+ loss_str.append(
|
|
|
+ "{}: {}".format(name, str(meter))
|
|
|
+ )
|
|
|
+ return self.delimiter.join(loss_str)
|
|
|
+
|
|
|
+ def synchronize_between_processes(self):
|
|
|
+ for meter in self.meters.values():
|
|
|
+ meter.synchronize_between_processes()
|
|
|
+
|
|
|
+ def add_meter(self, name, meter):
|
|
|
+ self.meters[name] = meter
|
|
|
+
|
|
|
+ def log_every(self, iterable, print_freq, header=None):
|
|
|
+ i = 0
|
|
|
+ if not header:
|
|
|
+ header = ''
|
|
|
+ start_time = time.time()
|
|
|
+ end = time.time()
|
|
|
+ iter_time = SmoothedValue(fmt='{avg:.4f}')
|
|
|
+ data_time = SmoothedValue(fmt='{avg:.4f}')
|
|
|
+ space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
|
|
+ if torch.cuda.is_available():
|
|
|
+ log_msg = self.delimiter.join([
|
|
|
+ header,
|
|
|
+ '[{0' + space_fmt + '}/{1}]',
|
|
|
+ 'eta: {eta}',
|
|
|
+ '{meters}',
|
|
|
+ 'time: {time}',
|
|
|
+ 'data: {data}',
|
|
|
+ 'max mem: {memory:.0f}'
|
|
|
+ ])
|
|
|
+ else:
|
|
|
+ log_msg = self.delimiter.join([
|
|
|
+ header,
|
|
|
+ '[{0' + space_fmt + '}/{1}]',
|
|
|
+ 'eta: {eta}',
|
|
|
+ '{meters}',
|
|
|
+ 'time: {time}',
|
|
|
+ 'data: {data}'
|
|
|
+ ])
|
|
|
+ MB = 1024.0 * 1024.0
|
|
|
+ for obj in iterable:
|
|
|
+ data_time.update(time.time() - end)
|
|
|
+ yield obj
|
|
|
+ iter_time.update(time.time() - end)
|
|
|
+ if i % print_freq == 0 or i == len(iterable) - 1:
|
|
|
+ eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
|
|
+ eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
|
|
+ if torch.cuda.is_available():
|
|
|
+ print(log_msg.format(
|
|
|
+ i, len(iterable), eta=eta_string,
|
|
|
+ meters=str(self),
|
|
|
+ time=str(iter_time), data=str(data_time),
|
|
|
+ memory=torch.cuda.max_memory_allocated() / MB))
|
|
|
+ else:
|
|
|
+ print(log_msg.format(
|
|
|
+ i, len(iterable), eta=eta_string,
|
|
|
+ meters=str(self),
|
|
|
+ time=str(iter_time), data=str(data_time)))
|
|
|
+ i += 1
|
|
|
+ end = time.time()
|
|
|
+ total_time = time.time() - start_time
|
|
|
+ total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
|
|
+ print('{} Total time: {} ({:.4f} s / it)'.format(
|
|
|
+ header, total_time_str, total_time / len(iterable)))
|
|
|
+
|
|
|
+class SinkhornDistance(torch.nn.Module):
|
|
|
+ def __init__(self, eps=1e-3, max_iter=100, reduction='none'):
|
|
|
+ super(SinkhornDistance, self).__init__()
|
|
|
+ self.eps = eps
|
|
|
+ self.max_iter = max_iter
|
|
|
+ self.reduction = reduction
|
|
|
+
|
|
|
+ def forward(self, mu, nu, C):
|
|
|
+ u = torch.ones_like(mu)
|
|
|
+ v = torch.ones_like(nu)
|
|
|
+
|
|
|
+ # Sinkhorn iterations
|
|
|
+ for i in range(self.max_iter):
|
|
|
+ v = self.eps * \
|
|
|
+ (torch.log(
|
|
|
+ nu + 1e-8) - torch.logsumexp(self.M(C, u, v).transpose(-2, -1), dim=-1)) + v
|
|
|
+ u = self.eps * \
|
|
|
+ (torch.log(
|
|
|
+ mu + 1e-8) - torch.logsumexp(self.M(C, u, v), dim=-1)) + u
|
|
|
+
|
|
|
+ U, V = u, v
|
|
|
+ # Transport plan pi = diag(a)*K*diag(b)
|
|
|
+ pi = torch.exp(
|
|
|
+ self.M(C, U, V)).detach()
|
|
|
+ # Sinkhorn distance
|
|
|
+ cost = torch.sum(
|
|
|
+ pi * C, dim=(-2, -1))
|
|
|
+ return cost, pi
|
|
|
+
|
|
|
+ def M(self, C, u, v):
|
|
|
+ '''
|
|
|
+ "Modified cost for logarithmic updates"
|
|
|
+ "$M_{ij} = (-c_{ij} + u_i + v_j) / epsilon$"
|
|
|
+ '''
|
|
|
+ return (-C + u.unsqueeze(-1) + v.unsqueeze(-2)) / self.eps
|
|
|
+
|
|
|
+
|
|
|
+# ---------------------------- Dataloader tools ----------------------------
|
|
|
+def _max_by_axis(the_list):
|
|
|
+ # type: (List[List[int]]) -> List[int]
|
|
|
+ maxes = the_list[0]
|
|
|
+ for sublist in the_list[1:]:
|
|
|
+ for index, item in enumerate(sublist):
|
|
|
+ maxes[index] = max(maxes[index], item)
|
|
|
+ return maxes
|
|
|
+
|
|
|
+def batch_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
|
|
+ # TODO make this more general
|
|
|
+ if tensor_list[0].ndim == 3:
|
|
|
+ # TODO make it support different-sized images
|
|
|
+ max_size = _max_by_axis([list(img.shape) for img in tensor_list])
|
|
|
+ # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
|
|
+ batch_shape = [len(tensor_list)] + max_size
|
|
|
+ b, c, h, w = batch_shape
|
|
|
+ dtype = tensor_list[0].dtype
|
|
|
+ device = tensor_list[0].device
|
|
|
+ tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
|
|
+ mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
|
|
|
+ for img, pad_img, m in zip(tensor_list, tensor, mask):
|
|
|
+ pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
|
|
+ m[: img.shape[1], :img.shape[2]] = False
|
|
|
+ else:
|
|
|
+ raise ValueError('not supported')
|
|
|
+
|
|
|
+ return tensor, mask
|
|
|
+
|
|
|
+def collate_fn(batch):
|
|
|
+ batch = list(zip(*batch))
|
|
|
+ batch[0] = batch_tensor_from_tensor_list(batch[0])
|
|
|
+
|
|
|
+ return tuple(batch)
|
|
|
+
|
|
|
+
|
|
|
+# ---------------------------- For Model ----------------------------
|
|
|
+def match_name_keywords(n, name_keywords):
|
|
|
+ out = False
|
|
|
+ for b in name_keywords:
|
|
|
+ if b in n:
|
|
|
+ out = True
|
|
|
+ break
|
|
|
+ return out
|
|
|
+
|
|
|
+## fuse Conv & BN layer
|
|
|
+def fuse_conv_bn(module):
|
|
|
+ """Recursively fuse conv and bn in a module.
|
|
|
+ During inference, the functionary of batch norm layers is turned off
|
|
|
+ but only the mean and var alone channels are used, which exposes the
|
|
|
+ chance to fuse it with the preceding conv layers to save computations and
|
|
|
+ simplify network structures.
|
|
|
+ Args:
|
|
|
+ module (nn.Module): Module to be fused.
|
|
|
+ Returns:
|
|
|
+ nn.Module: Fused module.
|
|
|
+ """
|
|
|
+ last_conv = None
|
|
|
+ last_conv_name = None
|
|
|
+
|
|
|
+ def _fuse_conv_bn(conv, bn):
|
|
|
+ """Fuse conv and bn into one module.
|
|
|
+ Args:
|
|
|
+ conv (nn.Module): Conv to be fused.
|
|
|
+ bn (nn.Module): BN to be fused.
|
|
|
+ Returns:
|
|
|
+ nn.Module: Fused module.
|
|
|
+ """
|
|
|
+ conv_w = conv.weight
|
|
|
+ conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
|
|
|
+ bn.running_mean)
|
|
|
+
|
|
|
+ factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
|
|
|
+ conv.weight = nn.Parameter(conv_w *
|
|
|
+ factor.reshape([conv.out_channels, 1, 1, 1]))
|
|
|
+ conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
|
|
|
+ return conv
|
|
|
+ for name, child in module.named_children():
|
|
|
+ if isinstance(child,
|
|
|
+ (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
|
|
|
+ if last_conv is None: # only fuse BN that is after Conv
|
|
|
+ continue
|
|
|
+ fused_conv = _fuse_conv_bn(last_conv, child)
|
|
|
+ module._modules[last_conv_name] = fused_conv
|
|
|
+ # To reduce changes, set BN as Identity instead of deleting it.
|
|
|
+ module._modules[name] = nn.Identity()
|
|
|
+ last_conv = None
|
|
|
+ elif isinstance(child, nn.Conv2d):
|
|
|
+ last_conv = child
|
|
|
+ last_conv_name = name
|
|
|
+ else:
|
|
|
+ fuse_conv_bn(child)
|
|
|
+ return module
|
|
|
+
|
|
|
+## compute FLOPs & Parameters
|
|
|
+def compute_flops(model, min_size, max_size, device):
|
|
|
+ if isinstance(min_size[0], List):
|
|
|
+ min_size, max_size = min_size[0]
|
|
|
+ else:
|
|
|
+ min_size = min_size[0]
|
|
|
+
|
|
|
+ x = torch.randn(1, 3, min_size, max_size).to(device)
|
|
|
+ print('==============================')
|
|
|
+ flops, params = profile(model, inputs=(x, ), verbose=False)
|
|
|
+ print('GFLOPs : {:.2f}'.format(flops / 1e9))
|
|
|
+ print('Params : {:.2f} M'.format(params / 1e6))
|
|
|
+
|
|
|
+## load trained weight
|
|
|
+def load_weight(model, path_to_ckpt, fuse_cbn=False):
|
|
|
+ # check ckpt file
|
|
|
+ if path_to_ckpt is None:
|
|
|
+ print('no weight file ...')
|
|
|
+ else:
|
|
|
+ checkpoint = torch.load(path_to_ckpt, map_location='cpu')
|
|
|
+ print('--------------------------------------')
|
|
|
+ print('Best model infor:')
|
|
|
+ print('Epoch: {}'.format(checkpoint.pop("epoch")))
|
|
|
+ print('mAP: {}'.format(checkpoint.pop("mAP")))
|
|
|
+ print('--------------------------------------')
|
|
|
+ checkpoint_state_dict = checkpoint.pop("model")
|
|
|
+ model.load_state_dict(checkpoint_state_dict)
|
|
|
+
|
|
|
+ print('Finished loading model!')
|
|
|
+
|
|
|
+ # fuse conv & bn
|
|
|
+ if fuse_cbn:
|
|
|
+ print('Fusing Conv & BN ...')
|
|
|
+ model = fuse_conv_bn(model)
|
|
|
+
|
|
|
+ return model
|
|
|
+
|
|
|
+## gradient clip
|
|
|
+def get_total_grad_norm(parameters, norm_type=2):
|
|
|
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
|
|
|
+ norm_type = float(norm_type)
|
|
|
+ device = parameters[0].grad.device
|
|
|
+ total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]),
|
|
|
+ norm_type)
|
|
|
+ return total_norm
|
|
|
+
|
|
|
+## param Dict
|
|
|
+def get_param_dict(model, cfg, return_name=False):
|
|
|
+ # sanity check: a variable could not match backbone_names and linear_proj_names at the same time
|
|
|
+ cfg['lr_backbone'] = cfg['base_lr'] * cfg['backbone_lr_ratio']
|
|
|
+ for n, p in model.named_parameters():
|
|
|
+ if match_name_keywords(n, cfg['lr_backbone_names']) and match_name_keywords(n, cfg['lr_linear_proj_names']):
|
|
|
+ raise ValueError
|
|
|
+
|
|
|
+ param_dicts = [
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if not match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and not match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and not match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['base_lr'],
|
|
|
+ "weight_decay": cfg['weight_decay'],
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and not match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and not match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['lr_backbone'],
|
|
|
+ "weight_decay": cfg['weight_decay'],
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if not match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and not match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['base_lr'] * cfg['lr_linear_proj_mult'],
|
|
|
+ "weight_decay": cfg['weight_decay'],
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if not match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and not match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['base_lr'],
|
|
|
+ "weight_decay": cfg['weight_decay'] * cfg['wd_norm_mult'],
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and not match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['lr_backbone'],
|
|
|
+ "weight_decay": cfg['weight_decay'] * cfg['wd_norm_mult'],
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "params": [
|
|
|
+ p if not return_name else n
|
|
|
+ for n, p in model.named_parameters()
|
|
|
+ if not match_name_keywords(n, cfg['lr_backbone_names'])
|
|
|
+ and match_name_keywords(n, cfg['lr_linear_proj_names'])
|
|
|
+ and match_name_keywords(n, cfg['wd_norm_names'])
|
|
|
+ and p.requires_grad
|
|
|
+ ],
|
|
|
+ "lr": cfg['base_lr'] * cfg['lr_linear_proj_mult'],
|
|
|
+ "weight_decay": cfg['weight_decay'] * cfg['wd_norm_mult'],
|
|
|
+ },
|
|
|
+ ]
|
|
|
+
|
|
|
+ return param_dicts
|
|
|
+
|
|
|
+## Model EMA
|
|
|
+class ModelEMA(object):
|
|
|
+ def __init__(self, cfg, model, updates=0):
|
|
|
+ # Create EMA
|
|
|
+ self.ema = deepcopy(self.de_parallel(model)).eval() # FP32 EMA
|
|
|
+ self.updates = updates # number of EMA updates
|
|
|
+ self.decay = lambda x: cfg['ema_decay'] * (1 - math.exp(-x / cfg['ema_tau'])) # decay exponential ramp (to help early epochs)
|
|
|
+ for p in self.ema.parameters():
|
|
|
+ p.requires_grad_(False)
|
|
|
+
|
|
|
+ def is_parallel(self, model):
|
|
|
+ # Returns True if model is of type DP or DDP
|
|
|
+ return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
|
|
|
+
|
|
|
+ def de_parallel(self, model):
|
|
|
+ # De-parallelize a model: returns single-GPU model if model is of type DP or DDP
|
|
|
+ return model.module if self.is_parallel(model) else model
|
|
|
+
|
|
|
+ def copy_attr(self, a, b, include=(), exclude=()):
|
|
|
+ # Copy attributes from b to a, options to only include [...] and to exclude [...]
|
|
|
+ for k, v in b.__dict__.items():
|
|
|
+ if (len(include) and k not in include) or k.startswith('_') or k in exclude:
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ setattr(a, k, v)
|
|
|
+
|
|
|
+ def update(self, model):
|
|
|
+ # Update EMA parameters
|
|
|
+ self.updates += 1
|
|
|
+ d = self.decay(self.updates)
|
|
|
+
|
|
|
+ msd = self.de_parallel(model).state_dict() # model state_dict
|
|
|
+ for k, v in self.ema.state_dict().items():
|
|
|
+ if v.dtype.is_floating_point: # true for FP16 and FP32
|
|
|
+ v *= d
|
|
|
+ v += (1 - d) * msd[k].detach()
|
|
|
+ # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32'
|
|
|
+
|
|
|
+ def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
|
|
|
+ # Update EMA attributes
|
|
|
+ self.copy_attr(self.ema, model, include, exclude)
|
|
|
+
|
|
|
+
|
|
|
+# ---------------------------- For Loss ----------------------------
|
|
|
+## focal loss
|
|
|
+def sigmoid_focal_loss(inputs, targets, alpha: float = 0.25, gamma: float = 2):
|
|
|
+ """
|
|
|
+ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
|
|
|
+ Args:
|
|
|
+ inputs: A float tensor of arbitrary shape.
|
|
|
+ The predictions for each example.
|
|
|
+ targets: A float tensor with the same shape as inputs. Stores the binary
|
|
|
+ classification label for each element in inputs
|
|
|
+ (0 for the negative class and 1 for the positive class).
|
|
|
+ alpha: (optional) Weighting factor in range (0,1) to balance
|
|
|
+ positive vs negative examples. Default = -1 (no weighting).
|
|
|
+ gamma: Exponent of the modulating factor (1 - p_t) to
|
|
|
+ balance easy vs hard examples.
|
|
|
+ Returns:
|
|
|
+ Loss tensor
|
|
|
+ """
|
|
|
+ prob = inputs.sigmoid()
|
|
|
+ ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
|
|
|
+ p_t = prob * targets + (1 - prob) * (1 - targets)
|
|
|
+ loss = ce_loss * ((1 - p_t) ** gamma)
|
|
|
+
|
|
|
+ if alpha >= 0:
|
|
|
+ alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
|
|
|
+ loss = alpha_t * loss
|
|
|
+
|
|
|
+ return loss
|
|
|
+
|
|
|
+
|
|
|
+# ---------------------------- NMS ----------------------------
|
|
|
+def nms(bboxes, scores, nms_thresh):
|
|
|
+ """"Pure Python NMS."""
|
|
|
+ x1 = bboxes[:, 0] #xmin
|
|
|
+ y1 = bboxes[:, 1] #ymin
|
|
|
+ x2 = bboxes[:, 2] #xmax
|
|
|
+ y2 = bboxes[:, 3] #ymax
|
|
|
+
|
|
|
+ areas = (x2 - x1) * (y2 - y1)
|
|
|
+ order = scores.argsort()[::-1]
|
|
|
+
|
|
|
+ keep = []
|
|
|
+ while order.size > 0:
|
|
|
+ i = order[0]
|
|
|
+ keep.append(i)
|
|
|
+ # compute iou
|
|
|
+ xx1 = np.maximum(x1[i], x1[order[1:]])
|
|
|
+ yy1 = np.maximum(y1[i], y1[order[1:]])
|
|
|
+ xx2 = np.minimum(x2[i], x2[order[1:]])
|
|
|
+ yy2 = np.minimum(y2[i], y2[order[1:]])
|
|
|
+
|
|
|
+ w = np.maximum(1e-10, xx2 - xx1)
|
|
|
+ h = np.maximum(1e-10, yy2 - yy1)
|
|
|
+ inter = w * h
|
|
|
+
|
|
|
+ iou = inter / (areas[i] + areas[order[1:]] - inter + 1e-14)
|
|
|
+ #reserve all the boundingbox whose ovr less than thresh
|
|
|
+ inds = np.where(iou <= nms_thresh)[0]
|
|
|
+ order = order[inds + 1]
|
|
|
+
|
|
|
+ return keep
|
|
|
+
|
|
|
+def multiclass_nms_class_agnostic(scores, labels, bboxes, nms_thresh):
|
|
|
+ # nms
|
|
|
+ keep = nms(bboxes, scores, nms_thresh)
|
|
|
+
|
|
|
+ scores = scores[keep]
|
|
|
+ labels = labels[keep]
|
|
|
+ bboxes = bboxes[keep]
|
|
|
+
|
|
|
+ return scores, labels, bboxes
|
|
|
+
|
|
|
+def multiclass_nms_class_aware(scores, labels, bboxes, nms_thresh, num_classes):
|
|
|
+ # nms
|
|
|
+ keep = np.zeros(len(bboxes), dtype=np.int32)
|
|
|
+ for i in range(num_classes):
|
|
|
+ inds = np.where(labels == i)[0]
|
|
|
+ if len(inds) == 0:
|
|
|
+ continue
|
|
|
+ c_bboxes = bboxes[inds]
|
|
|
+ c_scores = scores[inds]
|
|
|
+ c_keep = nms(c_bboxes, c_scores, nms_thresh)
|
|
|
+ keep[inds[c_keep]] = 1
|
|
|
+
|
|
|
+ keep = np.where(keep > 0)
|
|
|
+ scores = scores[keep]
|
|
|
+ labels = labels[keep]
|
|
|
+ bboxes = bboxes[keep]
|
|
|
+
|
|
|
+ return scores, labels, bboxes
|
|
|
+
|
|
|
+def multiclass_nms(scores, labels, bboxes, nms_thresh, num_classes, class_agnostic=False):
|
|
|
+ if class_agnostic:
|
|
|
+ return multiclass_nms_class_agnostic(scores, labels, bboxes, nms_thresh)
|
|
|
+ else:
|
|
|
+ return multiclass_nms_class_aware(scores, labels, bboxes, nms_thresh, num_classes)
|