matcher.py 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. import torch
  2. import numpy as np
  3. class Yolov1Matcher(object):
  4. def __init__(self, num_classes):
  5. self.num_classes = num_classes
  6. @torch.no_grad()
  7. def __call__(self, fmp_size, stride, targets):
  8. """
  9. img_size: (Int) input image size
  10. stride: (Int) -> stride of YOLOv1 output.
  11. targets: (Dict) dict{'boxes': [...],
  12. 'labels': [...],
  13. 'orig_size': ...}
  14. """
  15. # prepare
  16. bs = len(targets)
  17. fmp_h, fmp_w = fmp_size
  18. gt_objectness = np.zeros([bs, fmp_h, fmp_w, 1])
  19. gt_classes = np.zeros([bs, fmp_h, fmp_w, self.num_classes])
  20. gt_bboxes = np.zeros([bs, fmp_h, fmp_w, 4])
  21. for batch_index in range(bs):
  22. targets_per_image = targets[batch_index]
  23. # [N,]
  24. tgt_cls = targets_per_image["labels"].numpy()
  25. # [N, 4]
  26. tgt_box = targets_per_image['boxes'].numpy()
  27. for gt_box, gt_label in zip(tgt_box, tgt_cls):
  28. x1, y1, x2, y2 = gt_box
  29. # xyxy -> cxcywh
  30. xc, yc = (x2 + x1) * 0.5, (y2 + y1) * 0.5
  31. bw, bh = x2 - x1, y2 - y1
  32. # check
  33. if bw < 1. or bh < 1.:
  34. continue
  35. # grid
  36. xs_c = xc / stride
  37. ys_c = yc / stride
  38. grid_x = int(xs_c)
  39. grid_y = int(ys_c)
  40. if grid_x < fmp_w and grid_y < fmp_h:
  41. # obj
  42. gt_objectness[batch_index, grid_y, grid_x] = 1.0
  43. # cls
  44. cls_ont_hot = np.zeros(self.num_classes)
  45. cls_ont_hot[int(gt_label)] = 1.0
  46. gt_classes[batch_index, grid_y, grid_x] = cls_ont_hot
  47. # box
  48. gt_bboxes[batch_index, grid_y, grid_x] = np.array([x1, y1, x2, y2])
  49. # [B, M, C]
  50. gt_objectness = gt_objectness.reshape(bs, -1, 1)
  51. gt_classes = gt_classes.reshape(bs, -1, self.num_classes)
  52. gt_bboxes = gt_bboxes.reshape(bs, -1, 4)
  53. # to tensor
  54. gt_objectness = torch.from_numpy(gt_objectness).float()
  55. gt_classes = torch.from_numpy(gt_classes).float()
  56. gt_bboxes = torch.from_numpy(gt_bboxes).float()
  57. return gt_objectness, gt_classes, gt_bboxes