map_evaluator.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. import json
  2. import tempfile
  3. import torch
  4. from pycocotools.cocoeval import COCOeval
  5. from dataset.coco import COCODataset
  6. from dataset.voc import VOCDataset
  7. from utils.box_ops import rescale_bboxes
  8. class MapEvaluator():
  9. def __init__(self, dataset_name, cfg, data_dir, device, transform=None):
  10. # ----------------- Basic parameters -----------------
  11. self.transform = transform
  12. self.device = device
  13. # ----------------- Metrics -----------------
  14. self.map = 0.
  15. self.ap50_95 = 0.
  16. self.ap50 = 0.
  17. # ----------------- Dataset -----------------
  18. if dataset_name == "coco":
  19. self.dataset = COCODataset(cfg=cfg, data_dir=data_dir, transform=None, is_train=False)
  20. elif dataset_name == "voc":
  21. self.dataset = VOCDataset(cfg=cfg, data_dir=data_dir, transform=None, is_train=False)
  22. else:
  23. raise NotImplementedError("Unknown dataset name.")
  24. @torch.no_grad()
  25. def evaluate(self, model):
  26. model.eval()
  27. ids = []
  28. data_dict = []
  29. num_images = len(self.dataset)
  30. print('total number of images: %d' % (num_images))
  31. # --------------- COCO evaluation ---------------
  32. for index in range(num_images):
  33. if index % 500 == 0:
  34. print('[Eval: %d / %d]'%(index, num_images))
  35. # ----------- Load an image -----------
  36. img, img_id = self.dataset.pull_image(index)
  37. orig_h, orig_w, _ = img.shape
  38. orig_size = [orig_w, orig_h]
  39. # ----------- Data preprocess -----------
  40. x, _, ratio = self.transform(img)
  41. x = x.unsqueeze(0).to(self.device)
  42. img_id = int(img_id)
  43. ids.append(img_id)
  44. # ----------- Model inference -----------
  45. outputs = model(x)
  46. scores = outputs['scores']
  47. labels = outputs['labels']
  48. bboxes = outputs['bboxes']
  49. # ----------- Rescale bboxes -----------
  50. bboxes = rescale_bboxes(bboxes, orig_size, ratio)
  51. # ----------- Process results -----------
  52. for i, box in enumerate(bboxes):
  53. x1 = float(box[0])
  54. y1 = float(box[1])
  55. x2 = float(box[2])
  56. y2 = float(box[3])
  57. label = self.dataset.class_ids[int(labels[i])]
  58. # COCO box format: x1, y1, bw, bh
  59. bbox = [x1, y1, x2 - x1, y2 - y1]
  60. score = float(scores[i])
  61. # COCO json format
  62. A = {"image_id": img_id,
  63. "category_id": label,
  64. "bbox": bbox,
  65. "score": score}
  66. data_dict.append(A)
  67. annType = ['segm', 'bbox', 'keypoints']
  68. # ------------- COCO Box detection evaluation -------------
  69. if len(data_dict) > 0:
  70. print('evaluating ......')
  71. cocoGt = self.dataset.coco
  72. _, tmp = tempfile.mkstemp()
  73. json.dump(data_dict, open(tmp, 'w'))
  74. cocoDt = cocoGt.loadRes(tmp)
  75. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  76. cocoEval.params.imgIds = ids
  77. cocoEval.evaluate()
  78. cocoEval.accumulate()
  79. cocoEval.summarize()
  80. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  81. print('ap50_95 : ', ap50_95)
  82. print('ap50 : ', ap50)
  83. self.map = ap50_95
  84. self.ap50_95 = ap50_95
  85. self.ap50 = ap50
  86. return ap50, ap50_95
  87. else:
  88. return 0, 0