coco_evaluator.py 3.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import json
  2. import os
  3. import contextlib
  4. import torch
  5. from pycocotools.cocoeval import COCOeval
  6. from datasets import build_dataset, build_transform
  7. class COCOAPIEvaluator():
  8. def __init__(self, args, cfg, device, testset=False):
  9. # ----------------- Basic parameters -----------------
  10. self.ddp_mode = True if args.distributed else False
  11. self.image_set = 'test2017' if testset else 'val2017'
  12. self.device = device
  13. self.testset = testset
  14. # ----------------- Metrics -----------------
  15. self.map = 0.
  16. self.ap50_95 = 0.
  17. self.ap50 = 0.
  18. # ----------------- Dataset -----------------
  19. self.transform = build_transform(cfg, is_train=False)
  20. self.dataset, self.dataset_info = build_dataset(args, self.transform, is_train=False)
  21. @torch.no_grad()
  22. def evaluate(self, model):
  23. ids = []
  24. coco_results = []
  25. model.eval()
  26. model.trainable = False
  27. # start testing
  28. for index, (image, target) in enumerate(self.dataset):
  29. if index % 500 == 0:
  30. print('[Eval: %d / %d]'%(index, len(self.dataset)))
  31. # image id
  32. id_ = int(target['image_id'])
  33. ids.append(id_)
  34. # inference
  35. image = image.unsqueeze(0).to(self.device)
  36. outputs = model(image)
  37. bboxes, scores, cls_inds = outputs
  38. # rescale bbox
  39. orig_h, orig_w = target["orig_size"].tolist()
  40. bboxes[..., 0::2] *= orig_w
  41. bboxes[..., 1::2] *= orig_h
  42. # reformat results
  43. for i, box in enumerate(bboxes):
  44. x1 = float(box[0])
  45. y1 = float(box[1])
  46. x2 = float(box[2])
  47. y2 = float(box[3])
  48. label = self.dataset.coco_indexs[int(cls_inds[i])]
  49. # COCO json format
  50. bbox = [x1, y1, x2 - x1, y2 - y1]
  51. score = float(scores[i])
  52. A = {"image_id": id_,
  53. "category_id": label,
  54. "bbox": bbox,
  55. "score": score}
  56. coco_results.append(A)
  57. model.train()
  58. model.trainable = True
  59. annType = ['segm', 'bbox', 'keypoints']
  60. # Evaluate the Dt (detection) json comparing with the ground truth
  61. if len(coco_results) > 0:
  62. print('evaluating ......')
  63. cocoGt = self.dataset.coco
  64. if self.testset:
  65. json.dump(coco_results, open('coco_test-dev.json', 'w'))
  66. cocoDt = cocoGt.loadRes('coco_test-dev.json')
  67. else:
  68. # suppress pycocotools prints
  69. with open(os.devnull, 'w') as devnull:
  70. with contextlib.redirect_stdout(devnull):
  71. cocoDt = cocoGt.loadRes(coco_results)
  72. cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
  73. cocoEval.params.imgIds = ids
  74. cocoEval.evaluate()
  75. cocoEval.accumulate()
  76. cocoEval.summarize()
  77. # update mAP
  78. ap50_95, ap50 = cocoEval.stats[0], cocoEval.stats[1]
  79. print('ap50_95 : ', ap50_95)
  80. print('ap50 : ', ap50)
  81. self.map = ap50_95
  82. self.ap50_95 = ap50_95
  83. self.ap50 = ap50
  84. del coco_results
  85. else:
  86. print('No coco detection results !')