coco_evaluator.py 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import json
  2. import tempfile
  3. import torch
  4. from pycocotools.cocoeval import COCOeval
  5. from dataset.coco import COCODataset
  6. from utils.box_ops import rescale_bboxes
  7. class COCOAPIEvaluator():
  8. def __init__(self, cfg, data_dir, device, transform=None):
  9. # ----------------- Basic parameters -----------------
  10. self.image_set = 'val2017'
  11. self.transform = transform
  12. self.device = device
  13. # ----------------- Metrics -----------------
  14. self.map = 0.
  15. self.ap50_95 = 0.
  16. self.ap50 = 0.
  17. # ----------------- Dataset -----------------
  18. self.dataset = COCODataset(cfg=cfg, data_dir=data_dir, image_set=self.image_set, transform=None, is_train=False)
  19. @torch.no_grad()
  20. def evaluate(self, model):
  21. model.eval()
  22. ids = []
  23. data_dict = []
  24. num_images = len(self.dataset)
  25. print('total number of images: %d' % (num_images))
  26. # start testing
  27. for index in range(num_images): # all the data in val2017
  28. if index % 500 == 0:
  29. print('[Eval: %d / %d]'%(index, num_images))
  30. # load an image
  31. img, id_ = self.dataset.pull_image(index)
  32. orig_h, orig_w, _ = img.shape
  33. orig_size = [orig_w, orig_h]
  34. # preprocess
  35. x, _, ratio = self.transform(img)
  36. x = x.unsqueeze(0).to(self.device)
  37. id_ = int(id_)
  38. ids.append(id_)
  39. # inference
  40. outputs = model(x)
  41. scores = outputs['scores']
  42. labels = outputs['labels']
  43. bboxes = outputs['bboxes']
  44. # rescale bboxes
  45. bboxes = rescale_bboxes(bboxes, orig_size, ratio)
  46. # process outputs
  47. for i, box in enumerate(bboxes):
  48. x1 = float(box[0])
  49. y1 = float(box[1])
  50. x2 = float(box[2])
  51. y2 = float(box[3])
  52. label = self.dataset.class_ids[int(labels[i])]
  53. bbox = [x1, y1, x2 - x1, y2 - y1]
  54. score = float(scores[i]) # object score * class score
  55. A = {"image_id": id_, "category_id": label, "bbox": bbox,
  56. "score": score} # COCO json format
  57. data_dict.append(A)
  58. annType = ['segm', 'bbox', 'keypoints']
  59. # Evaluate the Dt (detection) json comparing with the ground truth
  60. if len(data_dict) > 0:
  61. print('evaluating ......')
  62. cocoGt = self.dataset.coco
  63. # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
  64. _, tmp = tempfile.mkstemp()
  65. json.dump(data_dict, open(tmp, 'w'))
  66. cocoDt = cocoGt.loadRes(tmp)
  67. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  68. cocoEval.params.imgIds = ids
  69. cocoEval.evaluate()
  70. cocoEval.accumulate()
  71. cocoEval.summarize()
  72. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  73. print('ap50_95 : ', ap50_95)
  74. print('ap50 : ', ap50)
  75. self.map = ap50_95
  76. self.ap50_95 = ap50_95
  77. self.ap50 = ap50
  78. return ap50, ap50_95
  79. else:
  80. return 0, 0