ourdataset_evaluator.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. import json
  2. import tempfile
  3. import torch
  4. from dataset.ourdataset import OurDataset
  5. from utils.box_ops import rescale_bboxes
  6. try:
  7. from pycocotools.cocoeval import COCOeval
  8. except:
  9. print("It seems that the COCOAPI is not installed.")
  10. class OurDatasetEvaluator():
  11. """
  12. COCO AP Evaluation class.
  13. All the data in the val2017 dataset are processed \
  14. and evaluated by COCO API.
  15. """
  16. def __init__(self, data_dir, device, image_set='val', transform=None):
  17. """
  18. Args:
  19. data_dir (str): dataset root directory
  20. img_size (int): image size after preprocess. images are resized \
  21. to squares whose shape is (img_size, img_size).
  22. confthre (float):
  23. confidence threshold ranging from 0 to 1, \
  24. which is defined in the config file.
  25. nmsthre (float):
  26. IoU threshold of non-max supression ranging from 0 to 1.
  27. """
  28. self.dataset = OurDataset(data_dir=data_dir, image_set=image_set, is_train=False)
  29. self.image_set = image_set
  30. self.transform = transform
  31. self.device = device
  32. self.map = 0.
  33. self.ap50_95 = 0.
  34. self.ap50 = 0.
  35. @torch.no_grad()
  36. def evaluate(self, model):
  37. """
  38. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  39. and the results are evaluated by COCO API.
  40. Args:
  41. model : model object
  42. Returns:
  43. ap50_95 (float) : calculated COCO AP for IoU=50:95
  44. ap50 (float) : calculated COCO AP for IoU=50
  45. """
  46. model.eval()
  47. ids = []
  48. data_dict = []
  49. num_images = len(self.dataset)
  50. print('total number of images: %d' % (num_images))
  51. # start testing
  52. for index in range(num_images): # all the data in val2017
  53. if index % 500 == 0:
  54. print('[Eval: %d / %d]'%(index, num_images))
  55. # load an image
  56. img, id_ = self.dataset.pull_image(index)
  57. orig_h, orig_w, _ = img.shape
  58. # preprocess
  59. x, _, deltas = self.transform(img)
  60. x = x.unsqueeze(0).to(self.device) / 255.
  61. id_ = int(id_)
  62. ids.append(id_)
  63. # inference
  64. outputs = model(x)
  65. bboxes, scores, cls_inds = outputs
  66. # rescale bboxes
  67. origin_img_size = [orig_h, orig_w]
  68. cur_img_size = [*x.shape[-2:]]
  69. bboxes = rescale_bboxes(bboxes, origin_img_size, cur_img_size, deltas)
  70. for i, box in enumerate(bboxes):
  71. x1 = float(box[0])
  72. y1 = float(box[1])
  73. x2 = float(box[2])
  74. y2 = float(box[3])
  75. label = self.dataset.class_ids[int(cls_inds[i])]
  76. bbox = [x1, y1, x2 - x1, y2 - y1]
  77. score = float(scores[i]) # object score * class score
  78. A = {"image_id": id_, "category_id": label, "bbox": bbox,
  79. "score": score} # COCO json format
  80. data_dict.append(A)
  81. annType = ['segm', 'bbox', 'keypoints']
  82. # Evaluate the Dt (detection) json comparing with the ground truth
  83. if len(data_dict) > 0:
  84. print('evaluating ......')
  85. cocoGt = self.dataset.coco
  86. # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
  87. _, tmp = tempfile.mkstemp()
  88. json.dump(data_dict, open(tmp, 'w'))
  89. cocoDt = cocoGt.loadRes(tmp)
  90. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  91. cocoEval.params.imgIds = ids
  92. cocoEval.evaluate()
  93. cocoEval.accumulate()
  94. cocoEval.summarize()
  95. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  96. print('ap50_95 : ', ap50_95)
  97. print('ap50 : ', ap50)
  98. self.map = ap50_95
  99. self.ap50_95 = ap50_95
  100. self.ap50 = ap50
  101. return ap50, ap50_95
  102. else:
  103. return 0, 0