Browse Source

modify evaluator codes

yjh0410 1 year ago
parent
commit
3c503e0e14
3 changed files with 36 additions and 40 deletions
  1. 18 16
      evaluator/coco_evaluator.py
  2. 18 15
      evaluator/customed_evaluator.py
  3. 0 9
      evaluator/voc_evaluator.py

+ 18 - 16
evaluator/coco_evaluator.py

@@ -20,7 +20,6 @@ class COCOAPIEvaluator():
         # ----------------- Dataset -----------------
         # ----------------- Dataset -----------------
         self.dataset = COCODataset(cfg=cfg, data_dir=data_dir, image_set=self.image_set, transform=None, is_train=False)
         self.dataset = COCODataset(cfg=cfg, data_dir=data_dir, image_set=self.image_set, transform=None, is_train=False)
 
 
-
     @torch.no_grad()
     @torch.no_grad()
     def evaluate(self, model):
     def evaluate(self, model):
         model.eval()
         model.eval()
@@ -29,33 +28,33 @@ class COCOAPIEvaluator():
         num_images = len(self.dataset)
         num_images = len(self.dataset)
         print('total number of images: %d' % (num_images))
         print('total number of images: %d' % (num_images))
 
 
-        # start testing
-        for index in range(num_images): # all the data in val2017
+        # --------------- COCO evaluation ---------------
+        for index in range(num_images):
             if index % 500 == 0:
             if index % 500 == 0:
                 print('[Eval: %d / %d]'%(index, num_images))
                 print('[Eval: %d / %d]'%(index, num_images))
 
 
-            # load an image
-            img, id_ = self.dataset.pull_image(index)
+            # ----------- Load an image -----------
+            img, img_id = self.dataset.pull_image(index)
             orig_h, orig_w, _ = img.shape
             orig_h, orig_w, _ = img.shape
             orig_size = [orig_w, orig_h]
             orig_size = [orig_w, orig_h]
 
 
-            # preprocess
+            # ----------- Data preprocess -----------
             x, _, ratio = self.transform(img)
             x, _, ratio = self.transform(img)
             x = x.unsqueeze(0).to(self.device)
             x = x.unsqueeze(0).to(self.device)
             
             
-            id_ = int(id_)
-            ids.append(id_)
+            img_id = int(img_id)
+            ids.append(img_id)
 
 
-            # inference
+            # ----------- Model inference -----------
             outputs = model(x)
             outputs = model(x)
             scores = outputs['scores']
             scores = outputs['scores']
             labels = outputs['labels']
             labels = outputs['labels']
             bboxes = outputs['bboxes']
             bboxes = outputs['bboxes']
 
 
-            # rescale bboxes
+            # ----------- Rescale bboxes -----------
             bboxes = rescale_bboxes(bboxes, orig_size, ratio)
             bboxes = rescale_bboxes(bboxes, orig_size, ratio)
 
 
-            # process outputs
+            # ----------- Process results -----------
             for i, box in enumerate(bboxes):
             for i, box in enumerate(bboxes):
                 x1 = float(box[0])
                 x1 = float(box[0])
                 y1 = float(box[1])
                 y1 = float(box[1])
@@ -63,19 +62,22 @@ class COCOAPIEvaluator():
                 y2 = float(box[3])
                 y2 = float(box[3])
                 label = self.dataset.class_ids[int(labels[i])]
                 label = self.dataset.class_ids[int(labels[i])]
                 
                 
+                # COCO box format: x1, y1, bw, bh
                 bbox = [x1, y1, x2 - x1, y2 - y1]
                 bbox = [x1, y1, x2 - x1, y2 - y1]
-                score = float(scores[i]) # object score * class score
-                A = {"image_id": id_, "category_id": label, "bbox": bbox,
-                     "score": score} # COCO json format
+                score = float(scores[i])
+                 # COCO json format
+                A = {"image_id":    img_id,
+                     "category_id": label,
+                     "bbox":        bbox,
+                     "score":       score}
                 data_dict.append(A)
                 data_dict.append(A)
 
 
         annType = ['segm', 'bbox', 'keypoints']
         annType = ['segm', 'bbox', 'keypoints']
 
 
-        # Evaluate the Dt (detection) json comparing with the ground truth
+        # ------------- COCO Box detection evaluation -------------
         if len(data_dict) > 0:
         if len(data_dict) > 0:
             print('evaluating ......')
             print('evaluating ......')
             cocoGt = self.dataset.coco
             cocoGt = self.dataset.coco
-            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
             _, tmp = tempfile.mkstemp()
             _, tmp = tempfile.mkstemp()
             json.dump(data_dict, open(tmp, 'w'))
             json.dump(data_dict, open(tmp, 'w'))
             cocoDt = cocoGt.loadRes(tmp)
             cocoDt = cocoGt.loadRes(tmp)

+ 18 - 15
evaluator/customed_evaluator.py

@@ -23,7 +23,6 @@ class CustomedEvaluator():
         # ----------------- Dataset -----------------
         # ----------------- Dataset -----------------
         self.dataset = CustomedDataset(cfg, data_dir=data_dir, image_set=image_set, transform=None, is_train=False)
         self.dataset = CustomedDataset(cfg, data_dir=data_dir, image_set=image_set, transform=None, is_train=False)
 
 
-
     @torch.no_grad()
     @torch.no_grad()
     def evaluate(self, model):
     def evaluate(self, model):
         """
         """
@@ -41,31 +40,32 @@ class CustomedEvaluator():
         num_images = len(self.dataset)
         num_images = len(self.dataset)
         print('total number of images: %d' % (num_images))
         print('total number of images: %d' % (num_images))
 
 
-        # start testing
-        for index in range(num_images): # all the data in val2017
+        # --------------- COCO-style evaluation ---------------
+        for index in range(num_images):
             if index % 500 == 0:
             if index % 500 == 0:
                 print('[Eval: %d / %d]'%(index, num_images))
                 print('[Eval: %d / %d]'%(index, num_images))
 
 
-            # load an image
-            img, id_ = self.dataset.pull_image(index)
+            # ----------- Load an image -----------
+            img, img_id = self.dataset.pull_image(index)
             orig_h, orig_w, _ = img.shape
             orig_h, orig_w, _ = img.shape
 
 
-            # preprocess
+            # ----------- Data preprocess -----------
             x, _, ratio = self.transform(img)
             x, _, ratio = self.transform(img)
             x = x.unsqueeze(0).to(self.device)
             x = x.unsqueeze(0).to(self.device)
             
             
-            id_ = int(id_)
-            ids.append(id_)
+            img_id = int(img_id)
+            ids.append(img_id)
 
 
-            # inference
+            # ----------- Model inference -----------
             outputs = model(x)
             outputs = model(x)
             scores = outputs['scores']
             scores = outputs['scores']
             labels = outputs['labels']
             labels = outputs['labels']
             bboxes = outputs['bboxes']
             bboxes = outputs['bboxes']
 
 
-            # rescale bboxes
+            # ----------- Rescale bboxes -----------
             bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
             bboxes = rescale_bboxes(bboxes, [orig_w, orig_h], ratio)
 
 
+            # ----------- Process results -----------
             for i, box in enumerate(bboxes):
             for i, box in enumerate(bboxes):
                 x1 = float(box[0])
                 x1 = float(box[0])
                 y1 = float(box[1])
                 y1 = float(box[1])
@@ -73,19 +73,22 @@ class CustomedEvaluator():
                 y2 = float(box[3])
                 y2 = float(box[3])
                 label = self.dataset.class_ids[int(labels[i])]
                 label = self.dataset.class_ids[int(labels[i])]
                 
                 
+                # COCO box format: x1, y1, bw, bh
                 bbox = [x1, y1, x2 - x1, y2 - y1]
                 bbox = [x1, y1, x2 - x1, y2 - y1]
-                score = float(scores[i]) # object score * class score
-                A = {"image_id": id_, "category_id": label, "bbox": bbox,
-                     "score": score} # COCO json format
+                score = float(scores[i])
+                 # COCO json format
+                A = {"image_id":    img_id,
+                     "category_id": label,
+                     "bbox":        bbox,
+                     "score":       score}
                 data_dict.append(A)
                 data_dict.append(A)
 
 
         annType = ['segm', 'bbox', 'keypoints']
         annType = ['segm', 'bbox', 'keypoints']
 
 
-        # Evaluate the Dt (detection) json comparing with the ground truth
+        # ------------- COCO Box detection evaluation -------------
         if len(data_dict) > 0:
         if len(data_dict) > 0:
             print('evaluating ......')
             print('evaluating ......')
             cocoGt = self.dataset.coco
             cocoGt = self.dataset.coco
-            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
             _, tmp = tempfile.mkstemp()
             _, tmp = tempfile.mkstemp()
             json.dump(data_dict, open(tmp, 'w'))
             json.dump(data_dict, open(tmp, 'w'))
             cocoDt = cocoGt.loadRes(tmp)
             cocoDt = cocoGt.loadRes(tmp)

+ 0 - 9
evaluator/voc_evaluator.py

@@ -51,7 +51,6 @@ class VOCAPIEvaluator():
             image_set=[('2007', set_type)],
             image_set=[('2007', set_type)],
             is_train=False)
             is_train=False)
         
         
-
     def evaluate(self, net):
     def evaluate(self, net):
         net.eval()
         net.eval()
         num_images = len(self.dataset)
         num_images = len(self.dataset)
@@ -106,7 +105,6 @@ class VOCAPIEvaluator():
 
 
         print('Mean AP: ', self.map)
         print('Mean AP: ', self.map)
   
   
-
     def parse_rec(self, filename):
     def parse_rec(self, filename):
         """ Parse a PASCAL VOC xml file """
         """ Parse a PASCAL VOC xml file """
         tree = ET.parse(filename)
         tree = ET.parse(filename)
@@ -126,7 +124,6 @@ class VOCAPIEvaluator():
 
 
         return objects
         return objects
 
 
-
     def get_output_dir(self, name, phase):
     def get_output_dir(self, name, phase):
         """Return the directory where experimental artifacts are placed.
         """Return the directory where experimental artifacts are placed.
         If the directory does not exist, it is created.
         If the directory does not exist, it is created.
@@ -138,7 +135,6 @@ class VOCAPIEvaluator():
             os.makedirs(filedir, exist_ok=True)
             os.makedirs(filedir, exist_ok=True)
         return filedir
         return filedir
 
 
-
     def get_voc_results_file_template(self, cls):
     def get_voc_results_file_template(self, cls):
         # VOCdevkit/VOC2007/results/det_test_aeroplane.txt
         # VOCdevkit/VOC2007/results/det_test_aeroplane.txt
         filename = 'det_' + self.set_type + '_%s.txt' % (cls)
         filename = 'det_' + self.set_type + '_%s.txt' % (cls)
@@ -148,7 +144,6 @@ class VOCAPIEvaluator():
         path = os.path.join(filedir, filename)
         path = os.path.join(filedir, filename)
         return path
         return path
 
 
-
     def write_voc_results_file(self, all_boxes):
     def write_voc_results_file(self, all_boxes):
         for cls_ind, cls in enumerate(self.labelmap):
         for cls_ind, cls in enumerate(self.labelmap):
             if self.display:
             if self.display:
@@ -166,7 +161,6 @@ class VOCAPIEvaluator():
                                     dets[k, 0] + 1, dets[k, 1] + 1,
                                     dets[k, 0] + 1, dets[k, 1] + 1,
                                     dets[k, 2] + 1, dets[k, 3] + 1))
                                     dets[k, 2] + 1, dets[k, 3] + 1))
 
 
-
     def do_python_eval(self, use_07=True):
     def do_python_eval(self, use_07=True):
         cachedir = os.path.join(self.devkit_path, 'annotations_cache')
         cachedir = os.path.join(self.devkit_path, 'annotations_cache')
         aps = []
         aps = []
@@ -205,7 +199,6 @@ class VOCAPIEvaluator():
             self.map = np.mean(aps)
             self.map = np.mean(aps)
             print('Mean AP = {:.4f}'.format(np.mean(aps)))
             print('Mean AP = {:.4f}'.format(np.mean(aps)))
 
 
-
     def voc_ap(self, rec, prec, use_07_metric=True):
     def voc_ap(self, rec, prec, use_07_metric=True):
         """ ap = voc_ap(rec, prec, [use_07_metric])
         """ ap = voc_ap(rec, prec, [use_07_metric])
         Compute VOC AP given precision and recall.
         Compute VOC AP given precision and recall.
@@ -239,7 +232,6 @@ class VOCAPIEvaluator():
             ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
             ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
         return ap
         return ap
 
 
-
     def voc_eval(self, detpath, classname, cachedir, ovthresh=0.5, use_07_metric=True):
     def voc_eval(self, detpath, classname, cachedir, ovthresh=0.5, use_07_metric=True):
         if not os.path.isdir(cachedir):
         if not os.path.isdir(cachedir):
             os.mkdir(cachedir)
             os.mkdir(cachedir)
@@ -347,7 +339,6 @@ class VOCAPIEvaluator():
 
 
         return rec, prec, ap
         return rec, prec, ap
 
 
-
     def evaluate_detections(self, box_list):
     def evaluate_detections(self, box_list):
         self.write_voc_results_file(box_list)
         self.write_voc_results_file(box_list)
         self.do_python_eval()
         self.do_python_eval()