custom_evaluator.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. import json
  2. import tempfile
  3. import torch
  4. from yolo.dataset.custom import CustomDataset
  5. from utils.box_ops import rescale_bboxes
  6. try:
  7. from pycocotools.cocoeval import COCOeval
  8. except:
  9. print("It seems that the COCOAPI is not installed.")
  10. class CustomEvaluator():
  11. def __init__(self, cfg, data_dir, device, image_set='val', transform=None):
  12. # ----------------- Basic parameters -----------------
  13. self.image_set = image_set
  14. self.transform = transform
  15. self.device = device
  16. # ----------------- Metrics -----------------
  17. self.map = 0.
  18. self.ap50_95 = 0.
  19. self.ap50 = 0.
  20. # ----------------- Dataset -----------------
  21. self.dataset = CustomDataset(cfg, data_dir=data_dir, image_set=image_set, transform=None, is_train=False)
  22. @torch.no_grad()
  23. def evaluate(self, model):
  24. """
  25. COCO average precision (AP) Evaluation. Iterate inference on the test dataset
  26. and the results are evaluated by COCO API.
  27. Args:
  28. model : model object
  29. Returns:
  30. ap50_95 (float) : calculated COCO AP for IoU=50:95
  31. ap50 (float) : calculated COCO AP for IoU=50
  32. """
  33. model.eval()
  34. ids = []
  35. data_dict = []
  36. num_images = len(self.dataset)
  37. print('total number of images: %d' % (num_images))
  38. # --------------- COCO-style evaluation ---------------
  39. for index in range(num_images):
  40. if index % 500 == 0:
  41. print('[Eval: %d / %d]'%(index, num_images))
  42. # ----------- Load an image -----------
  43. img, img_id = self.dataset.pull_image(index)
  44. orig_h, orig_w, _ = img.shape
  45. # ----------- Data preprocess -----------
  46. x, _, ratio = self.transform(img)
  47. x = x.unsqueeze(0).to(self.device)
  48. img_id = int(img_id)
  49. ids.append(img_id)
  50. # ----------- Model inference -----------
  51. outputs = model(x)
  52. scores = outputs['scores']
  53. labels = outputs['labels']
  54. bboxes = outputs['bboxes']
  55. # ----------- Rescale bboxes -----------
  56. bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
  57. # ----------- Process results -----------
  58. for i, box in enumerate(bboxes):
  59. x1 = float(box[0])
  60. y1 = float(box[1])
  61. x2 = float(box[2])
  62. y2 = float(box[3])
  63. label = self.dataset.class_ids[int(labels[i])]
  64. # COCO box format: x1, y1, bw, bh
  65. bbox = [x1, y1, x2 - x1, y2 - y1]
  66. score = float(scores[i])
  67. # COCO json format
  68. A = {"image_id": img_id,
  69. "category_id": label,
  70. "bbox": bbox,
  71. "score": score}
  72. data_dict.append(A)
  73. annType = ['segm', 'bbox', 'keypoints']
  74. # ------------- COCO Box detection evaluation -------------
  75. if len(data_dict) > 0:
  76. print('evaluating ......')
  77. cocoGt = self.dataset.coco
  78. _, tmp = tempfile.mkstemp()
  79. json.dump(data_dict, open(tmp, 'w'))
  80. cocoDt = cocoGt.loadRes(tmp)
  81. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  82. cocoEval.params.imgIds = ids
  83. cocoEval.evaluate()
  84. cocoEval.accumulate()
  85. cocoEval.summarize()
  86. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  87. print('ap50_95 : ', ap50_95)
  88. print('ap50 : ', ap50)
  89. self.map = ap50_95
  90. self.ap50_95 = ap50_95
  91. self.ap50 = ap50
  92. return ap50, ap50_95
  93. else:
  94. return 0, 0