customed_evaluator.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. import json
  2. import tempfile
  3. import torch
  4. from dataset.customed import CustomedDataset
  5. from utils.box_ops import rescale_bboxes
  6. try:
  7. from pycocotools.cocoeval import COCOeval
  8. except:
  9. print("It seems that the COCOAPI is not installed.")
  10. class CustomedEvaluator():
  11. def __init__(self, data_dir, device, image_set='val', transform=None):
  12. # ----------------- Basic parameters -----------------
  13. self.image_set = image_set
  14. self.transform = transform
  15. self.device = device
  16. # ----------------- Metrics -----------------
  17. self.map = 0.
  18. self.ap50_95 = 0.
  19. self.ap50 = 0.
  20. # ----------------- Dataset -----------------
  21. self.dataset = CustomedDataset(data_dir=data_dir, image_set=image_set)
  22. @torch.no_grad()
  23. def evaluate(self, model):
  24. """
  25. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  26. and the results are evaluated by COCO API.
  27. Args:
  28. model : model object
  29. Returns:
  30. ap50_95 (float) : calculated COCO AP for IoU=50:95
  31. ap50 (float) : calculated COCO AP for IoU=50
  32. """
  33. model.eval()
  34. ids = []
  35. data_dict = []
  36. num_images = len(self.dataset)
  37. print('total number of images: %d' % (num_images))
  38. # start testing
  39. for index in range(num_images): # all the data in val2017
  40. if index % 500 == 0:
  41. print('[Eval: %d / %d]'%(index, num_images))
  42. # load an image
  43. img, id_ = self.dataset.pull_image(index)
  44. orig_h, orig_w, _ = img.shape
  45. # preprocess
  46. x, _, ratio = self.transform(img)
  47. x = x.unsqueeze(0).to(self.device) / 255.
  48. id_ = int(id_)
  49. ids.append(id_)
  50. # inference
  51. outputs = model(x)
  52. bboxes, scores, labels = outputs
  53. # rescale bboxes
  54. bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
  55. for i, box in enumerate(bboxes):
  56. x1 = float(box[0])
  57. y1 = float(box[1])
  58. x2 = float(box[2])
  59. y2 = float(box[3])
  60. label = self.dataset.class_ids[int(labels[i])]
  61. bbox = [x1, y1, x2 - x1, y2 - y1]
  62. score = float(scores[i]) # object score * class score
  63. A = {"image_id": id_, "category_id": label, "bbox": bbox,
  64. "score": score} # COCO json format
  65. data_dict.append(A)
  66. annType = ['segm', 'bbox', 'keypoints']
  67. # Evaluate the Dt (detection) json comparing with the ground truth
  68. if len(data_dict) > 0:
  69. print('evaluating ......')
  70. cocoGt = self.dataset.coco
  71. # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
  72. _, tmp = tempfile.mkstemp()
  73. json.dump(data_dict, open(tmp, 'w'))
  74. cocoDt = cocoGt.loadRes(tmp)
  75. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  76. cocoEval.params.imgIds = ids
  77. cocoEval.evaluate()
  78. cocoEval.accumulate()
  79. cocoEval.summarize()
  80. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  81. print('ap50_95 : ', ap50_95)
  82. print('ap50 : ', ap50)
  83. self.map = ap50_95
  84. self.ap50_95 = ap50_95
  85. self.ap50 = ap50
  86. return ap50, ap50_95
  87. else:
  88. return 0, 0