matcher.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. import numpy as np
  2. import torch
  3. class Yolov4Matcher(object):
  4. def __init__(self, num_classes, num_anchors, anchor_size, iou_thresh):
  5. self.num_classes = num_classes
  6. self.num_anchors = num_anchors
  7. self.iou_thresh = iou_thresh
  8. self.anchor_boxes = np.array(
  9. [[0., 0., anchor[0], anchor[1]]
  10. for anchor in anchor_size]
  11. ) # [KA, 4]
  12. def compute_iou(self, anchor_boxes, gt_box):
  13. """
  14. anchor_boxes : ndarray -> [KA, 4] (cx, cy, bw, bh).
  15. gt_box : ndarray -> [1, 4] (cx, cy, bw, bh).
  16. """
  17. # anchors: [KA, 4]
  18. anchors = np.zeros_like(anchor_boxes)
  19. anchors[..., :2] = anchor_boxes[..., :2] - anchor_boxes[..., 2:] * 0.5 # x1y1
  20. anchors[..., 2:] = anchor_boxes[..., :2] + anchor_boxes[..., 2:] * 0.5 # x2y2
  21. anchors_area = anchor_boxes[..., 2] * anchor_boxes[..., 3]
  22. # gt_box: [1, 4] -> [KA, 4]
  23. gt_box = np.array(gt_box).reshape(-1, 4)
  24. gt_box = np.repeat(gt_box, anchors.shape[0], axis=0)
  25. gt_box_ = np.zeros_like(gt_box)
  26. gt_box_[..., :2] = gt_box[..., :2] - gt_box[..., 2:] * 0.5 # x1y1
  27. gt_box_[..., 2:] = gt_box[..., :2] + gt_box[..., 2:] * 0.5 # x2y2
  28. gt_box_area = np.prod(gt_box[..., 2:] - gt_box[..., :2], axis=1)
  29. # intersection
  30. inter_w = np.minimum(anchors[:, 2], gt_box_[:, 2]) - \
  31. np.maximum(anchors[:, 0], gt_box_[:, 0])
  32. inter_h = np.minimum(anchors[:, 3], gt_box_[:, 3]) - \
  33. np.maximum(anchors[:, 1], gt_box_[:, 1])
  34. inter_area = inter_w * inter_h
  35. # union
  36. union_area = anchors_area + gt_box_area - inter_area
  37. # iou
  38. iou = inter_area / union_area
  39. iou = np.clip(iou, a_min=1e-10, a_max=1.0)
  40. return iou
  41. @torch.no_grad()
  42. def __call__(self, fmp_sizes, fpn_strides, targets):
  43. """
  44. fmp_size: (List) [fmp_h, fmp_w]
  45. fpn_strides: (List) -> [8, 16, 32, ...] stride of network output.
  46. targets: (Dict) dict{'boxes': [...],
  47. 'labels': [...],
  48. 'orig_size': ...}
  49. """
  50. assert len(fmp_sizes) == len(fpn_strides)
  51. # prepare
  52. bs = len(targets)
  53. gt_objectness = [
  54. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 1])
  55. for (fmp_h, fmp_w) in fmp_sizes
  56. ]
  57. gt_classes = [
  58. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, self.num_classes])
  59. for (fmp_h, fmp_w) in fmp_sizes
  60. ]
  61. gt_bboxes = [
  62. torch.zeros([bs, fmp_h, fmp_w, self.num_anchors, 4])
  63. for (fmp_h, fmp_w) in fmp_sizes
  64. ]
  65. for batch_index in range(bs):
  66. targets_per_image = targets[batch_index]
  67. # [N,]
  68. tgt_cls = targets_per_image["labels"].numpy()
  69. # [N, 4]
  70. tgt_box = targets_per_image['boxes'].numpy()
  71. for gt_box, gt_label in zip(tgt_box, tgt_cls):
  72. # get a bbox coords
  73. x1, y1, x2, y2 = gt_box.tolist()
  74. # xyxy -> cxcywh
  75. xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
  76. bw, bh = x2 - x1, y2 - y1
  77. gt_box = [0, 0, bw, bh]
  78. # check target
  79. if bw < 1. or bh < 1.:
  80. # invalid target
  81. continue
  82. # compute IoU
  83. iou = self.compute_iou(self.anchor_boxes, gt_box)
  84. iou_mask = (iou > self.iou_thresh)
  85. label_assignment_results = []
  86. if iou_mask.sum() == 0:
  87. # We assign the anchor box with highest IoU score.
  88. iou_ind = np.argmax(iou)
  89. level = iou_ind // self.num_anchors # pyramid level
  90. anchor_idx = iou_ind - level * self.num_anchors # anchor index
  91. # get the corresponding stride
  92. stride = fpn_strides[level]
  93. # compute the grid cell
  94. xc_s = xc / stride
  95. yc_s = yc / stride
  96. grid_x = int(xc_s)
  97. grid_y = int(yc_s)
  98. label_assignment_results.append([grid_x, grid_y, level, anchor_idx])
  99. else:
  100. for iou_ind, iou_m in enumerate(iou_mask):
  101. if iou_m:
  102. level = iou_ind // self.num_anchors # pyramid level
  103. anchor_idx = iou_ind - level * self.num_anchors # anchor index
  104. # get the corresponding stride
  105. stride = fpn_strides[level]
  106. # compute the gride cell
  107. xc_s = xc / stride
  108. yc_s = yc / stride
  109. grid_x = int(xc_s)
  110. grid_y = int(yc_s)
  111. label_assignment_results.append([grid_x, grid_y, level, anchor_idx])
  112. # label assignment
  113. for result in label_assignment_results:
  114. grid_x, grid_y, level, anchor_idx = result
  115. stride = fpn_strides[level]
  116. x1s, y1s = x1 / stride, y1 / stride
  117. x2s, y2s = x2 / stride, y2 / stride
  118. fmp_h, fmp_w = fmp_sizes[level]
  119. # 3x3 center sampling
  120. for j in range(grid_y - 1, grid_y + 2):
  121. for i in range(grid_x - 1, grid_x + 2):
  122. is_in_box = (j >= y1s and j < y2s) and (i >= x1s and i < x2s)
  123. is_valid = (j >= 0 and j < fmp_h) and (i >= 0 and i < fmp_w)
  124. if is_in_box and is_valid:
  125. # obj
  126. gt_objectness[level][batch_index, j, i, anchor_idx] = 1.0
  127. # cls
  128. cls_ont_hot = torch.zeros(self.num_classes)
  129. cls_ont_hot[int(gt_label)] = 1.0
  130. gt_classes[level][batch_index, j, i, anchor_idx] = cls_ont_hot
  131. # box
  132. gt_bboxes[level][batch_index, j, i, anchor_idx] = torch.as_tensor([x1, y1, x2, y2])
  133. # [B, M, C]
  134. gt_objectness = torch.cat([gt.view(bs, -1, 1) for gt in gt_objectness], dim=1).float()
  135. gt_classes = torch.cat([gt.view(bs, -1, self.num_classes) for gt in gt_classes], dim=1).float()
  136. gt_bboxes = torch.cat([gt.view(bs, -1, 4) for gt in gt_bboxes], dim=1).float()
  137. return gt_objectness, gt_classes, gt_bboxes