crowdhuman_evaluator.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. import os
  2. import json
  3. import time
  4. import numpy as np
  5. import torch
  6. from dataset.crowdhuman import CrowdHumanDataset
  7. from .crowdhuman_tools import compute_JI, compute_APMR
  8. from utils.box_ops import rescale_bboxes
  9. class CrowdHumanEvaluator():
  10. def __init__(self, data_dir, device, image_set='val', transform=None):
  11. """
  12. Args:
  13. data_dir (str): dataset root directory
  14. device: (int): CUDA or CPU.
  15. image_set: train or val.
  16. transform: used to preprocess inputs.
  17. """
  18. # ----------------- Basic parameters -----------------
  19. self.eval_source = os.path.join(data_dir, 'annotation_val.odgt')
  20. self.image_set = image_set
  21. self.transform = transform
  22. self.device = device
  23. self.evalDir = os.path.join('det_results', 'eval', 'CrowdHuman', time.strftime("%Y%S"))
  24. os.makedirs(self.evalDir, exist_ok=True)
  25. # ----------------- Metrics -----------------
  26. self.map = 0.
  27. self.mr = 0.
  28. self.ji = 0.
  29. # ----------------- Dataset -----------------
  30. self.dataset = CrowdHumanDataset(data_dir=data_dir, image_set=image_set)
  31. def boxes_dump(self, boxes):
  32. if boxes.shape[-1] == 7:
  33. result = [{'box':[round(i, 1) for i in box[:4]],
  34. 'score':round(float(box[4]), 5),
  35. 'tag':int(box[5]),
  36. 'proposal_num':int(box[6])} for box in boxes]
  37. elif boxes.shape[-1] == 6:
  38. result = [{'box':[round(i, 1) for i in box[:4].tolist()],
  39. 'score':round(float(box[4]), 5),
  40. 'tag':int(box[5])} for box in boxes]
  41. elif boxes.shape[-1] == 5:
  42. result = [{'box':[round(i, 1) for i in box[:4]],
  43. 'tag':int(box[4])} for box in boxes]
  44. else:
  45. raise ValueError('Unknown box dim.')
  46. return result
  47. @torch.no_grad()
  48. def inference(self, model):
  49. model.eval()
  50. all_result_dicts = []
  51. num_images = len(self.dataset)
  52. print('total number of images: %d' % (num_images))
  53. # start testing
  54. for index in range(num_images): # all the data in val2017
  55. if index % 500 == 0:
  56. print('[Eval: %d / %d]'%(index, num_images))
  57. # load an image
  58. img, img_id = self.dataset.pull_image(index)
  59. orig_h, orig_w, _ = img.shape
  60. # load a gt
  61. gt_bboxes, gt_labels = self.dataset.pull_anno(index)
  62. gt_bboxes = np.array(gt_bboxes)[..., :4] # [N, 4]
  63. gt_tag = np.ones([gt_bboxes.shape[0], 1], dtype=gt_bboxes.dtype)
  64. gt_bboxes = np.concatenate([gt_bboxes, gt_tag], axis=-1)
  65. # preprocess
  66. x, _, ratio = self.transform(img)
  67. x = x.unsqueeze(0).to(self.device) / 255.
  68. # inference
  69. outputs = model(x)
  70. scores = outputs['scores']
  71. labels = outputs['labels']
  72. bboxes = outputs['bboxes']
  73. # rescale bboxes
  74. bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
  75. pd_tag = np.ones_like(scores)
  76. pd_bboxes = np.concatenate(
  77. [bboxes, scores[..., None], pd_tag[..., None]], axis=-1)
  78. # [x1, y1, x2, y2] -> [x1, y1, bw, bh]
  79. pd_bboxes[:, 2:4] -= pd_bboxes[:, :2]
  80. gt_bboxes[:, 2:4] -= gt_bboxes[:, :2]
  81. result_dict = dict(
  82. ID=img_id,
  83. height=int(orig_h),
  84. width=int(orig_w),
  85. dtboxes=self.boxes_dump(pd_bboxes.astype(np.float64)),
  86. gtboxes=self.boxes_dump(gt_bboxes.astype(np.float64))
  87. )
  88. all_result_dicts.append(result_dict)
  89. return all_result_dicts
  90. @torch.no_grad()
  91. def evaluate(self, model):
  92. # inference
  93. all_results = self.inference(model)
  94. # save json lines
  95. fpath = os.path.join(self.evalDir, 'dump-{}.json'.format('yolo_free'))
  96. with open(fpath,'w') as fid:
  97. for db in all_results:
  98. line = json.dumps(db)+'\n'
  99. fid.write(line)
  100. # evaluation
  101. eval_path = os.path.join(self.evalDir, 'eval-{}.json'.format('yolo_free'))
  102. eval_fid = open(eval_path,'w')
  103. res_line, JI = compute_JI.evaluation_all(fpath, 'box')
  104. for line in res_line:
  105. eval_fid.write(line+'\n')
  106. AP, MR = compute_APMR.compute_APMR(fpath, self.eval_source, 'box')
  107. line = 'AP:{:.4f}, MR:{:.4f}, JI:{:.4f}.'.format(AP, MR, JI)
  108. print(line)
  109. eval_fid.write(line+'\n')
  110. eval_fid.close()
  111. self.map = AP
  112. self.mr = MR
  113. self.ji = JI