customed_evaluator.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. import json
  2. import tempfile
  3. import torch
  4. from dataset.customed import CustomedDataset
  5. from utils.box_ops import rescale_bboxes
  6. try:
  7. from pycocotools.cocoeval import COCOeval
  8. except:
  9. print("It seems that the COCOAPI is not installed.")
  10. class CustomedEvaluator():
  11. def __init__(self, data_dir, device, image_set='val', transform=None):
  12. # ----------------- Basic parameters -----------------
  13. self.image_set = image_set
  14. self.transform = transform
  15. self.device = device
  16. # ----------------- Metrics -----------------
  17. self.map = 0.
  18. self.ap50_95 = 0.
  19. self.ap50 = 0.
  20. # ----------------- Dataset -----------------
  21. self.dataset = CustomedDataset(data_dir=data_dir, image_set=image_set)
  22. @torch.no_grad()
  23. def evaluate(self, model):
  24. """
  25. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  26. and the results are evaluated by COCO API.
  27. Args:
  28. model : model object
  29. Returns:
  30. ap50_95 (float) : calculated COCO AP for IoU=50:95
  31. ap50 (float) : calculated COCO AP for IoU=50
  32. """
  33. model.eval()
  34. ids = []
  35. data_dict = []
  36. num_images = len(self.dataset)
  37. print('total number of images: %d' % (num_images))
  38. # start testing
  39. for index in range(num_images): # all the data in val2017
  40. if index % 500 == 0:
  41. print('[Eval: %d / %d]'%(index, num_images))
  42. # load an image
  43. img, id_ = self.dataset.pull_image(index)
  44. orig_h, orig_w, _ = img.shape
  45. # preprocess
  46. x, _, ratio = self.transform(img)
  47. x = x.unsqueeze(0).to(self.device) / 255.
  48. id_ = int(id_)
  49. ids.append(id_)
  50. # inference
  51. outputs = model(x)
  52. scores = outputs['scores']
  53. labels = outputs['labels']
  54. bboxes = outputs['bboxes']
  55. # rescale bboxes
  56. bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
  57. for i, box in enumerate(bboxes):
  58. x1 = float(box[0])
  59. y1 = float(box[1])
  60. x2 = float(box[2])
  61. y2 = float(box[3])
  62. label = self.dataset.class_ids[int(labels[i])]
  63. bbox = [x1, y1, x2 - x1, y2 - y1]
  64. score = float(scores[i]) # object score * class score
  65. A = {"image_id": id_, "category_id": label, "bbox": bbox,
  66. "score": score} # COCO json format
  67. data_dict.append(A)
  68. annType = ['segm', 'bbox', 'keypoints']
  69. # Evaluate the Dt (detection) json comparing with the ground truth
  70. if len(data_dict) > 0:
  71. print('evaluating ......')
  72. cocoGt = self.dataset.coco
  73. # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
  74. _, tmp = tempfile.mkstemp()
  75. json.dump(data_dict, open(tmp, 'w'))
  76. cocoDt = cocoGt.loadRes(tmp)
  77. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  78. cocoEval.params.imgIds = ids
  79. cocoEval.evaluate()
  80. cocoEval.accumulate()
  81. cocoEval.summarize()
  82. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  83. print('ap50_95 : ', ap50_95)
  84. print('ap50 : ', ap50)
  85. self.map = ap50_95
  86. self.ap50_95 = ap50_95
  87. self.ap50 = ap50
  88. return ap50, ap50_95
  89. else:
  90. return 0, 0