widerface_evaluator.py 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import json
  2. import tempfile
  3. import torch
  4. import numpy as np
  5. from dataset.widerface import WiderFaceDataset
  6. from utils.box_ops import rescale_bboxes
  7. try:
  8. from pycocotools.cocoeval import COCOeval
  9. except:
  10. print("It seems that the COCOAPI is not installed.")
  11. class WiderFaceEvaluator():
  12. """
  13. COCO AP Evaluation class.
  14. All the data in the val2017 dataset are processed \
  15. and evaluated by COCO API.
  16. """
  17. def __init__(self, data_dir, device, image_set='val', transform=None):
  18. """
  19. data_dir (str): dataset root directory
  20. device: (int): CUDA or CPU.
  21. image_set: train or val.
  22. transform: used to preprocess inputs.
  23. """
  24. # ----------------- Basic parameters -----------------
  25. self.image_set = image_set
  26. self.transform = transform
  27. self.device = device
  28. # ----------------- Metrics -----------------
  29. self.map = 0.
  30. self.ap50_95 = 0.
  31. self.ap50 = 0.
  32. # ----------------- Dataset -----------------
  33. self.dataset = WiderFaceDataset(data_dir=data_dir, image_set=image_set)
  34. @torch.no_grad()
  35. def evaluate(self, model):
  36. """
  37. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  38. and the results are evaluated by COCO API.
  39. Args:
  40. model : model object
  41. Returns:
  42. ap50_95 (float) : calculated COCO AP for IoU=50:95
  43. ap50 (float) : calculated COCO AP for IoU=50
  44. """
  45. model.eval()
  46. ids = []
  47. data_dict = []
  48. num_images = len(self.dataset)
  49. print('total number of images: %d' % (num_images))
  50. # start testing
  51. for index in range(num_images): # all the data in val2017
  52. if index % 500 == 0:
  53. print('[Eval: %d / %d]'%(index, num_images))
  54. # load an image
  55. img, id_ = self.dataset.pull_image(index)
  56. orig_h, orig_w, _ = img.shape
  57. # preprocess
  58. x, _, ratio = self.transform(img)
  59. x = x.unsqueeze(0).to(self.device) / 255.
  60. id_ = int(id_)
  61. ids.append(id_)
  62. # inference
  63. outputs = model(x)
  64. scores = outputs['scores']
  65. labels = outputs['labels']
  66. bboxes = outputs['bboxes']
  67. # rescale bboxes
  68. bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
  69. for i, box in enumerate(bboxes):
  70. x1 = float(box[0])
  71. y1 = float(box[1])
  72. x2 = float(box[2])
  73. y2 = float(box[3])
  74. label = self.dataset.class_ids[int(labels[i])]
  75. bbox = [x1, y1, x2 - x1, y2 - y1]
  76. score = float(scores[i]) # object score * class score
  77. A = {"image_id": id_, "category_id": label, "bbox": bbox,
  78. "score": score} # COCO json format
  79. data_dict.append(A)
  80. annType = ['segm', 'bbox', 'keypoints']
  81. # Evaluate the Dt (detection) json comparing with the ground truth
  82. if len(data_dict) > 0:
  83. print('evaluating ......')
  84. cocoGt = self.dataset.coco
  85. # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
  86. _, tmp = tempfile.mkstemp()
  87. json.dump(data_dict, open(tmp, 'w'))
  88. cocoDt = cocoGt.loadRes(tmp)
  89. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  90. cocoEval.params.imgIds = ids
  91. cocoEval.evaluate()
  92. cocoEval.accumulate()
  93. cocoEval.summarize()
  94. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  95. print('ap50_95 : ', ap50_95)
  96. print('ap50 : ', ap50)
  97. self.map = ap50_95
  98. self.ap50_95 = ap50_95
  99. self.ap50 = ap50
  100. return ap50, ap50_95
  101. else:
  102. return 0, 0