Browse Source

update transform

yjh0410 2 years ago
parent
commit
790ee1e557

BIN
config/__pycache__/transform_config.cpython-36.pyc


BIN
config/__pycache__/yolov1_config.cpython-36.pyc


+ 2 - 4
config/transform_config.py

@@ -4,8 +4,8 @@
 yolov5_trans_config = {
     'aug_type': 'yolov5',
     # Pixel mean & std
-    'pixel_mean': [0.406, 0.456, 0.485],
-    'pixel_std': [0.225, 0.224, 0.229],
+    'pixel_mean': [0., 0., 0.],
+    'pixel_std': [1., 1., 1.],
     # Basic Augment
     'degrees': 0.0,
     'translate': 0.2,
@@ -28,8 +28,6 @@ ssd_trans_config = {
     'aug_type': 'ssd',
     'pixel_mean': [0.406, 0.456, 0.485],
     'pixel_std': [0.225, 0.224, 0.229],
-    'mosaic_prob': 0.0,
-    'mixup_prob': 0.0,
     # Mosaic & Mixup
     'mosaic_prob': 0.,
     'mixup_prob': 0.,

+ 1 - 1
config/yolov1_config.py

@@ -2,7 +2,7 @@
 
 yolov1_cfg = {
     # input
-    'trans_type': 'yolov5',
+    'trans_type': 'ssd',
     # loss weight
     'loss_obj_weight': 1.0,
     'loss_cls_weight': 1.0,

BIN
dataset/data_augment/__pycache__/yolov5_augment.cpython-36.pyc


+ 145 - 0
eval.py

@@ -0,0 +1,145 @@
+import argparse
+import os
+
+from copy import deepcopy
+import torch
+
+from evaluator.voc_evaluator import VOCAPIEvaluator
+from evaluator.coco_evaluator import COCOAPIEvaluator
+
+# load transform
+from dataset.data_augment import build_transform
+
+# load some utils
+from utils.misc import load_weight
+from utils.com_flops_params import FLOPs_and_Params
+
+from models import build_model
+from config import build_model_config, build_trans_config
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description='YOLO-Tutorial')
+    # basic
+    parser.add_argument('-size', '--img_size', default=416, type=int,
+                        help='the max size of input image')
+    parser.add_argument('--cuda', action='store_true', default=False,
+                        help='Use cuda')
+
+    # model
+    parser.add_argument('-m', '--model', default='yolo_anchor', type=str,
+                        help='build YOLO')
+    parser.add_argument('--weight', default=None,
+                        type=str, help='Trained state_dict file path to open')
+    parser.add_argument('--conf_thresh', default=0.001, type=float,
+                        help='NMS threshold')
+    parser.add_argument('--nms_thresh', default=0.6, type=float,
+                        help='NMS threshold')
+    parser.add_argument('--topk', default=1000, type=int,
+                        help='topk candidates for testing')
+    parser.add_argument("--no_decode", action="store_true", default=False,
+                        help="not decode in inference or yes")
+
+    # dataset
+    parser.add_argument('--root', default='/mnt/share/ssd2/dataset',
+                        help='data root')
+    parser.add_argument('-d', '--dataset', default='coco',
+                        help='coco, voc.')
+    # TTA
+    parser.add_argument('-tta', '--test_aug', action='store_true', default=False,
+                        help='use test augmentation.')
+
+    return parser.parse_args()
+
+
+
+def voc_test(model, data_dir, device, transform):
+    evaluator = VOCAPIEvaluator(data_dir=data_dir,
+                                device=device,
+                                transform=transform,
+                                display=True)
+
+    # VOC evaluation
+    evaluator.evaluate(model)
+
+
+def coco_test(model, data_dir, device, transform, test=False):
+    if test:
+        # test-dev
+        print('test on test-dev 2017')
+        evaluator = COCOAPIEvaluator(
+                        data_dir=data_dir,
+                        device=device,
+                        testset=True,
+                        transform=transform)
+
+    else:
+        # eval
+        evaluator = COCOAPIEvaluator(
+                        data_dir=data_dir,
+                        device=device,
+                        testset=False,
+                        transform=transform)
+
+    # COCO evaluation
+    evaluator.evaluate(model)
+
+
+if __name__ == '__main__':
+    args = parse_args()
+    # cuda
+    if args.cuda:
+        print('use cuda')
+        device = torch.device("cuda")
+    else:
+        device = torch.device("cpu")
+
+    # dataset
+    if args.dataset == 'voc':
+        print('eval on voc ...')
+        num_classes = 20
+        data_dir = os.path.join(args.root, 'VOCdevkit')
+    elif args.dataset == 'coco-val':
+        print('eval on coco-val ...')
+        num_classes = 80
+        data_dir = os.path.join(args.root, 'COCO')
+    elif args.dataset == 'coco-test':
+        print('eval on coco-test-dev ...')
+        num_classes = 80
+        data_dir = os.path.join(args.root, 'COCO')
+    else:
+        print('unknow dataset !! we only support voc, coco-val, coco-test !!!')
+        exit(0)
+
+    # config
+    model_cfg = build_model_config(args)
+    trans_cfg = build_trans_config(model_cfg['trans_type'])
+
+    # build model
+    model = build_model(args, model_cfg, device, num_classes, False)
+
+    # load trained weight
+    model = load_weight(model=model, path_to_ckpt=args.weight)
+    model.to(device).eval()
+
+    # compute FLOPs and Params
+    model_copy = deepcopy(model)
+    model_copy.trainable = False
+    model_copy.eval()
+    FLOPs_and_Params(
+        model=model_copy,
+        img_size=args.img_size, 
+        device=device)
+    del model_copy
+
+    # transform
+    transform = build_transform(args.img_size, trans_cfg, is_train=False)
+
+    # evaluation
+    with torch.no_grad():
+        if args.dataset == 'voc':
+            voc_test(model, data_dir, device, transform)
+        elif args.dataset == 'coco-val':
+            coco_test(model, data_dir, device, transform, test=False)
+        elif args.dataset == 'coco-test':
+            coco_test(model, data_dir, device, transform, test=True)

BIN
evaluator/__pycache__/voc_evaluator.cpython-36.pyc


+ 1 - 0
evaluator/coco_evaluator.py

@@ -71,6 +71,7 @@ class COCOAPIEvaluator():
 
             # preprocess
             x, _, deltas = self.transform(img)
+            x = x.unsqueeze(0).to(self.device)
             
             id_ = int(id_)
             ids.append(id_)

+ 1 - 0
evaluator/voc_evaluator.py

@@ -67,6 +67,7 @@ class VOCAPIEvaluator():
 
             # preprocess
             x, _, deltas = self.transform(img)
+            x = x.unsqueeze(0).to(self.device)
 
             # forward
             t0 = time.time()

+ 1 - 1
test.py

@@ -24,7 +24,7 @@ def parse_args():
     parser = argparse.ArgumentParser(description='YOLO-Tutorial')
 
     # basic
-    parser.add_argument('-size', '--img_size', default=640, type=int,
+    parser.add_argument('-size', '--img_size', default=416, type=int,
                         help='the max size of input image')
     parser.add_argument('--show', action='store_true', default=False,
                         help='show the visulization results.')