matcher.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419
  1. import numpy as np
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from utils.box_ops import bbox_iou
  6. # -------------------------- YOLOv5 Assigner --------------------------
  7. class Yolov5Matcher(object):
  8. def __init__(self, num_classes, num_anchors, anchor_size, anchor_theshold):
  9. self.num_classes = num_classes
  10. self.num_anchors = num_anchors
  11. self.anchor_theshold = anchor_theshold
  12. # [KA, 2]
  13. self.anchor_sizes = np.array([[anchor[0], anchor[1]]
  14. for anchor in anchor_size])
  15. # [KA, 4]
  16. self.anchor_boxes = np.array([[0., 0., anchor[0], anchor[1]]
  17. for anchor in anchor_size])
  18. def compute_iou(self, anchor_boxes, gt_box):
  19. """
  20. anchor_boxes : ndarray -> [KA, 4] (cx, cy, bw, bh).
  21. gt_box : ndarray -> [1, 4] (cx, cy, bw, bh).
  22. """
  23. # anchors: [KA, 4]
  24. anchors = np.zeros_like(anchor_boxes)
  25. anchors[..., :2] = anchor_boxes[..., :2] - anchor_boxes[..., 2:] * 0.5 # x1y1
  26. anchors[..., 2:] = anchor_boxes[..., :2] + anchor_boxes[..., 2:] * 0.5 # x2y2
  27. anchors_area = anchor_boxes[..., 2] * anchor_boxes[..., 3]
  28. # gt_box: [1, 4] -> [KA, 4]
  29. gt_box = np.array(gt_box).reshape(-1, 4)
  30. gt_box = np.repeat(gt_box, anchors.shape[0], axis=0)
  31. gt_box_ = np.zeros_like(gt_box)
  32. gt_box_[..., :2] = gt_box[..., :2] - gt_box[..., 2:] * 0.5 # x1y1
  33. gt_box_[..., 2:] = gt_box[..., :2] + gt_box[..., 2:] * 0.5 # x2y2
  34. gt_box_area = np.prod(gt_box[..., 2:] - gt_box[..., :2], axis=1)
  35. # intersection
  36. inter_w = np.minimum(anchors[:, 2], gt_box_[:, 2]) - \
  37. np.maximum(anchors[:, 0], gt_box_[:, 0])
  38. inter_h = np.minimum(anchors[:, 3], gt_box_[:, 3]) - \
  39. np.maximum(anchors[:, 1], gt_box_[:, 1])
  40. inter_area = inter_w * inter_h
  41. # union
  42. union_area = anchors_area + gt_box_area - inter_area
  43. # iou
  44. iou = inter_area / union_area
  45. iou = np.clip(iou, a_min=1e-10, a_max=1.0)
  46. return iou
  47. def iou_assignment(self, ctr_points, gt_box, fpn_strides):
  48. # compute IoU
  49. iou = self.compute_iou(self.anchor_boxes, gt_box)
  50. iou_mask = (iou > 0.5)
  51. label_assignment_results = []
  52. if iou_mask.sum() == 0:
  53. # We assign the anchor box with highest IoU score.
  54. iou_ind = np.argmax(iou)
  55. level = iou_ind // self.num_anchors # pyramid level
  56. anchor_idx = iou_ind - level * self.num_anchors # anchor index
  57. # get the corresponding stride
  58. stride = fpn_strides[level]
  59. # compute the grid cell
  60. xc, yc = ctr_points
  61. xc_s = xc / stride
  62. yc_s = yc / stride
  63. grid_x = int(xc_s)
  64. grid_y = int(yc_s)
  65. label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
  66. else:
  67. for iou_ind, iou_m in enumerate(iou_mask):
  68. if iou_m:
  69. level = iou_ind // self.num_anchors # pyramid level
  70. anchor_idx = iou_ind - level * self.num_anchors # anchor index
  71. # get the corresponding stride
  72. stride = fpn_strides[level]
  73. # compute the gride cell
  74. xc, yc = ctr_points
  75. xc_s = xc / stride
  76. yc_s = yc / stride
  77. grid_x = int(xc_s)
  78. grid_y = int(yc_s)
  79. label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
  80. return label_assignment_results
  81. def aspect_ratio_assignment(self, ctr_points, keeps, fpn_strides):
  82. label_assignment_results = []
  83. for keep_idx, keep in enumerate(keeps):
  84. if keep:
  85. level = keep_idx // self.num_anchors # pyramid level
  86. anchor_idx = keep_idx - level * self.num_anchors # anchor index
  87. # get the corresponding stride
  88. stride = fpn_strides[level]
  89. # compute the gride cell
  90. xc, yc = ctr_points
  91. xc_s = xc / stride
  92. yc_s = yc / stride
  93. grid_x = int(xc_s)
  94. grid_y = int(yc_s)
  95. label_assignment_results.append([grid_x, grid_y, xc_s, yc_s, level, anchor_idx])
  96. return label_assignment_results
  97. @torch.no_grad()
  98. def __call__(self, fmp_sizes, fpn_strides, targets):
  99. """
  100. fmp_size: (List) [fmp_h, fmp_w]
  101. fpn_strides: (List) -> [8, 16, 32, ...] stride of network output.
  102. targets: (Dict) dict{'boxes': [...],
  103. 'labels': [...],
  104. 'orig_size': ...}
  105. """
  106. assert len(fmp_sizes) == len(fpn_strides)
  107. # prepare
  108. bs = len(targets)
  109. gt_objectness = [
  110. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 1])
  111. for (fmp_h, fmp_w) in fmp_sizes
  112. ]
  113. gt_classes = [
  114. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, self.num_classes])
  115. for (fmp_h, fmp_w) in fmp_sizes
  116. ]
  117. gt_bboxes = [
  118. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 4])
  119. for (fmp_h, fmp_w) in fmp_sizes
  120. ]
  121. for batch_index in range(bs):
  122. targets_per_image = targets[batch_index]
  123. # [N,]
  124. tgt_cls = targets_per_image["labels"].numpy()
  125. # [N, 4]
  126. tgt_box = targets_per_image['boxes'].numpy()
  127. for gt_box, gt_label in zip(tgt_box, tgt_cls):
  128. # get a bbox coords
  129. x1, y1, x2, y2 = gt_box.tolist()
  130. # xyxy -> cxcywh
  131. xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
  132. bw, bh = x2 - x1, y2 - y1
  133. gt_box = np.array([[0., 0., bw, bh]])
  134. # check target
  135. if bw < 1. or bh < 1.:
  136. # invalid target
  137. continue
  138. # compute aspect ratio
  139. ratios = gt_box[..., 2:] / self.anchor_sizes
  140. keeps = np.maximum(ratios, 1 / ratios).max(-1) < self.anchor_theshold
  141. if keeps.sum() == 0:
  142. label_assignment_results = self.iou_assignment([xc, yc], gt_box, fpn_strides)
  143. else:
  144. label_assignment_results = self.aspect_ratio_assignment([xc, yc], keeps, fpn_strides)
  145. # label assignment
  146. for result in label_assignment_results:
  147. # assignment
  148. grid_x, grid_y, xc_s, yc_s, level, anchor_idx = result
  149. stride = fpn_strides[level]
  150. fmp_h, fmp_w = fmp_sizes[level]
  151. # coord on the feature
  152. x1s, y1s = x1 / stride, y1 / stride
  153. x2s, y2s = x2 / stride, y2 / stride
  154. # offset
  155. off_x = xc_s - grid_x
  156. off_y = yc_s - grid_y
  157. if off_x <= 0.5 and off_y <= 0.5: # top left
  158. grids = [(grid_x-1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
  159. elif off_x > 0.5 and off_y <= 0.5: # top right
  160. grids = [(grid_x+1, grid_y), (grid_x, grid_y-1), (grid_x, grid_y)]
  161. elif off_x <= 0.5 and off_y > 0.5: # bottom left
  162. grids = [(grid_x-1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
  163. elif off_x > 0.5 and off_y > 0.5: # bottom right
  164. grids = [(grid_x+1, grid_y), (grid_x, grid_y+1), (grid_x, grid_y)]
  165. for (i, j) in grids:
  166. is_in_box = (j >= y1s and j < y2s) and (i >= x1s and i < x2s)
  167. is_valid = (j >= 0 and j < fmp_h) and (i >= 0 and i < fmp_w)
  168. if is_in_box and is_valid:
  169. # obj
  170. gt_objectness[level][batch_index, j, i, anchor_idx] = 1.0
  171. # cls
  172. cls_ont_hot = torch.zeros(self.num_classes)
  173. cls_ont_hot[int(gt_label)] = 1.0
  174. gt_classes[level][batch_index, j, i, anchor_idx] = cls_ont_hot
  175. # box
  176. gt_bboxes[level][batch_index, j, i, anchor_idx] = torch.as_tensor([x1, y1, x2, y2])
  177. # [B, M, C]
  178. gt_objectness = torch.cat([gt.view(bs, -1, 1) for gt in gt_objectness], dim=1).float()
  179. gt_classes = torch.cat([gt.view(bs, -1, self.num_classes) for gt in gt_classes], dim=1).float()
  180. gt_bboxes = torch.cat([gt.view(bs, -1, 4) for gt in gt_bboxes], dim=1).float()
  181. return gt_objectness, gt_classes, gt_bboxes
  182. # -------------------------- Task Aligned Assigner --------------------------
  183. class TaskAlignedAssigner(nn.Module):
  184. def __init__(self,
  185. topk=10,
  186. num_classes=80,
  187. alpha=0.5,
  188. beta=6.0,
  189. eps=1e-9):
  190. super(TaskAlignedAssigner, self).__init__()
  191. self.topk = topk
  192. self.num_classes = num_classes
  193. self.bg_idx = num_classes
  194. self.alpha = alpha
  195. self.beta = beta
  196. self.eps = eps
  197. @torch.no_grad()
  198. def forward(self,
  199. pd_scores,
  200. pd_bboxes,
  201. anc_points,
  202. gt_labels,
  203. gt_bboxes):
  204. """This code referenced to
  205. https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py
  206. Args:
  207. pd_scores (Tensor): shape(bs, num_total_anchors, num_classes)
  208. pd_bboxes (Tensor): shape(bs, num_total_anchors, 4)
  209. anc_points (Tensor): shape(num_total_anchors, 2)
  210. gt_labels (Tensor): shape(bs, n_max_boxes, 1)
  211. gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
  212. Returns:
  213. target_labels (Tensor): shape(bs, num_total_anchors)
  214. target_bboxes (Tensor): shape(bs, num_total_anchors, 4)
  215. target_scores (Tensor): shape(bs, num_total_anchors, num_classes)
  216. fg_mask (Tensor): shape(bs, num_total_anchors)
  217. """
  218. self.bs = pd_scores.size(0)
  219. self.n_max_boxes = gt_bboxes.size(1)
  220. mask_pos, align_metric, overlaps = self.get_pos_mask(
  221. pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points)
  222. target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(
  223. mask_pos, overlaps, self.n_max_boxes)
  224. # assigned target
  225. target_labels, target_bboxes, target_scores = self.get_targets(
  226. gt_labels, gt_bboxes, target_gt_idx, fg_mask)
  227. # normalize
  228. align_metric *= mask_pos
  229. pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj
  230. pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj
  231. norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1)
  232. target_scores = target_scores * norm_align_metric
  233. return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx
  234. def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points):
  235. # get anchor_align metric, (b, max_num_obj, h*w)
  236. align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes)
  237. # get in_gts mask, (b, max_num_obj, h*w)
  238. mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes)
  239. # get topk_metric mask, (b, max_num_obj, h*w)
  240. mask_topk = self.select_topk_candidates(align_metric * mask_in_gts)
  241. # merge all mask to a final mask, (b, max_num_obj, h*w)
  242. mask_pos = mask_topk * mask_in_gts
  243. return mask_pos, align_metric, overlaps
  244. def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes):
  245. ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj
  246. ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj
  247. ind[1] = gt_labels.long().squeeze(-1) # b, max_num_obj
  248. # get the scores of each grid for each gt cls
  249. bbox_scores = pd_scores[ind[0], :, ind[1]] # b, max_num_obj, h*w
  250. overlaps = bbox_iou(gt_bboxes.unsqueeze(2), pd_bboxes.unsqueeze(1), xywh=False,
  251. CIoU=True).squeeze(3).clamp(0)
  252. align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta)
  253. return align_metric, overlaps
  254. def select_topk_candidates(self, metrics, largest=True):
  255. """
  256. Args:
  257. metrics: (b, max_num_obj, h*w).
  258. topk_mask: (b, max_num_obj, topk) or None
  259. """
  260. num_anchors = metrics.shape[-1] # h*w
  261. # (b, max_num_obj, topk)
  262. topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
  263. topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).tile([1, 1, self.topk])
  264. # (b, max_num_obj, topk)
  265. topk_idxs[~topk_mask] = 0
  266. # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w)
  267. is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2)
  268. # filter invalid bboxes
  269. is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk)
  270. return is_in_topk.to(metrics.dtype)
  271. def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask):
  272. """
  273. Args:
  274. gt_labels: (b, max_num_obj, 1)
  275. gt_bboxes: (b, max_num_obj, 4)
  276. target_gt_idx: (b, h*w)
  277. fg_mask: (b, h*w)
  278. """
  279. # assigned target labels, (b, 1)
  280. batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None]
  281. target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w)
  282. target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w)
  283. # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w)
  284. target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx]
  285. # assigned target scores
  286. target_labels.clamp(0)
  287. target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80)
  288. fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80)
  289. target_scores = torch.where(fg_scores_mask > 0, target_scores, 0)
  290. return target_labels, target_bboxes, target_scores
  291. # -------------------------- Basic Functions --------------------------
  292. def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9):
  293. """select the positive anchors's center in gt
  294. Args:
  295. xy_centers (Tensor): shape(bs*n_max_boxes, num_total_anchors, 4)
  296. gt_bboxes (Tensor): shape(bs, n_max_boxes, 4)
  297. Return:
  298. (Tensor): shape(bs, n_max_boxes, num_total_anchors)
  299. """
  300. n_anchors = xy_centers.size(0)
  301. bs, n_max_boxes, _ = gt_bboxes.size()
  302. _gt_bboxes = gt_bboxes.reshape([-1, 4])
  303. xy_centers = xy_centers.unsqueeze(0).repeat(bs * n_max_boxes, 1, 1)
  304. gt_bboxes_lt = _gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, n_anchors, 1)
  305. gt_bboxes_rb = _gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, n_anchors, 1)
  306. b_lt = xy_centers - gt_bboxes_lt
  307. b_rb = gt_bboxes_rb - xy_centers
  308. bbox_deltas = torch.cat([b_lt, b_rb], dim=-1)
  309. bbox_deltas = bbox_deltas.reshape([bs, n_max_boxes, n_anchors, -1])
  310. return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
  311. def select_highest_overlaps(mask_pos, overlaps, n_max_boxes):
  312. """if an anchor box is assigned to multiple gts,
  313. the one with the highest iou will be selected.
  314. Args:
  315. mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
  316. overlaps (Tensor): shape(bs, n_max_boxes, num_total_anchors)
  317. Return:
  318. target_gt_idx (Tensor): shape(bs, num_total_anchors)
  319. fg_mask (Tensor): shape(bs, num_total_anchors)
  320. mask_pos (Tensor): shape(bs, n_max_boxes, num_total_anchors)
  321. """
  322. fg_mask = mask_pos.sum(axis=-2)
  323. if fg_mask.max() > 1:
  324. mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1])
  325. max_overlaps_idx = overlaps.argmax(axis=1)
  326. is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes)
  327. is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
  328. mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos)
  329. fg_mask = mask_pos.sum(axis=-2)
  330. target_gt_idx = mask_pos.argmax(axis=-2)
  331. return target_gt_idx, fg_mask , mask_pos
  332. def iou_calculator(box1, box2, eps=1e-9):
  333. """Calculate iou for batch
  334. Args:
  335. box1 (Tensor): shape(bs, n_max_boxes, 1, 4)
  336. box2 (Tensor): shape(bs, 1, num_total_anchors, 4)
  337. Return:
  338. (Tensor): shape(bs, n_max_boxes, num_total_anchors)
  339. """
  340. box1 = box1.unsqueeze(2) # [N, M1, 4] -> [N, M1, 1, 4]
  341. box2 = box2.unsqueeze(1) # [N, M2, 4] -> [N, 1, M2, 4]
  342. px1y1, px2y2 = box1[:, :, :, 0:2], box1[:, :, :, 2:4]
  343. gx1y1, gx2y2 = box2[:, :, :, 0:2], box2[:, :, :, 2:4]
  344. x1y1 = torch.maximum(px1y1, gx1y1)
  345. x2y2 = torch.minimum(px2y2, gx2y2)
  346. overlap = (x2y2 - x1y1).clip(0).prod(-1)
  347. area1 = (px2y2 - px1y1).clip(0).prod(-1)
  348. area2 = (gx2y2 - gx1y1).clip(0).prod(-1)
  349. union = area1 + area2 - overlap + eps
  350. return overlap / union