eval.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. import argparse
  2. import os
  3. from copy import deepcopy
  4. import torch
  5. from evaluator.voc_evaluator import VOCAPIEvaluator
  6. from evaluator.coco_evaluator import COCOAPIEvaluator
  7. from evaluator.ourdataset_evaluator import OurDatasetEvaluator
  8. # load transform
  9. from dataset.build import build_transform
  10. # load some utils
  11. from utils.misc import load_weight
  12. from utils.misc import compute_flops
  13. from config import build_dataset_config, build_model_config, build_trans_config
  14. from models.detectors import build_model
  15. def parse_args():
  16. parser = argparse.ArgumentParser(description='YOLO-Tutorial')
  17. # basic
  18. parser.add_argument('-size', '--img_size', default=640, type=int,
  19. help='the max size of input image')
  20. parser.add_argument('--cuda', action='store_true', default=False,
  21. help='Use cuda')
  22. # model
  23. parser.add_argument('-m', '--model', default='yolov1', type=str,
  24. help='build yolo')
  25. parser.add_argument('--weight', default=None,
  26. type=str, help='Trained state_dict file path to open')
  27. parser.add_argument('-ct', '--conf_thresh', default=0.001, type=float,
  28. help='confidence threshold')
  29. parser.add_argument('-nt', '--nms_thresh', default=0.7, type=float,
  30. help='NMS threshold')
  31. parser.add_argument('--topk', default=1000, type=int,
  32. help='topk candidates dets of each level before NMS')
  33. parser.add_argument("--no_decode", action="store_true", default=False,
  34. help="not decode in inference or yes")
  35. parser.add_argument('--fuse_conv_bn', action='store_true', default=False,
  36. help='fuse Conv & BN')
  37. parser.add_argument('--no_multi_labels', action='store_true', default=False,
  38. help='Perform post-process with multi-labels trick.')
  39. parser.add_argument('--nms_class_agnostic', action='store_true', default=False,
  40. help='Perform NMS operations regardless of category.')
  41. # dataset
  42. parser.add_argument('--root', default='/mnt/share/ssd2/dataset',
  43. help='data root')
  44. parser.add_argument('-d', '--dataset', default='coco',
  45. help='coco, voc.')
  46. parser.add_argument('--mosaic', default=None, type=float,
  47. help='mosaic augmentation.')
  48. parser.add_argument('--mixup', default=None, type=float,
  49. help='mixup augmentation.')
  50. parser.add_argument('--load_cache', action='store_true', default=False,
  51. help='load data into memory.')
  52. # TTA
  53. parser.add_argument('-tta', '--test_aug', action='store_true', default=False,
  54. help='use test augmentation.')
  55. return parser.parse_args()
  56. def voc_test(model, data_dir, device, transform):
  57. evaluator = VOCAPIEvaluator(data_dir=data_dir,
  58. device=device,
  59. transform=transform,
  60. display=True)
  61. # VOC evaluation
  62. evaluator.evaluate(model)
  63. def coco_test(model, data_dir, device, transform, test=False):
  64. if test:
  65. # test-dev
  66. print('test on test-dev 2017')
  67. evaluator = COCOAPIEvaluator(
  68. data_dir=data_dir,
  69. device=device,
  70. testset=True,
  71. transform=transform)
  72. else:
  73. # eval
  74. evaluator = COCOAPIEvaluator(
  75. data_dir=data_dir,
  76. device=device,
  77. testset=False,
  78. transform=transform)
  79. # COCO evaluation
  80. evaluator.evaluate(model)
  81. def our_test(model, data_dir, device, transform):
  82. evaluator = OurDatasetEvaluator(
  83. data_dir=data_dir,
  84. device=device,
  85. image_set='val',
  86. transform=transform)
  87. # WiderFace evaluation
  88. evaluator.evaluate(model)
  89. if __name__ == '__main__':
  90. args = parse_args()
  91. # cuda
  92. if args.cuda:
  93. print('use cuda')
  94. device = torch.device("cuda")
  95. else:
  96. device = torch.device("cpu")
  97. # Dataset & Model Config
  98. data_cfg = build_dataset_config(args)
  99. model_cfg = build_model_config(args)
  100. trans_cfg = build_trans_config(model_cfg['trans_type'])
  101. data_dir = os.path.join(args.root, data_cfg['data_name'])
  102. num_classes = data_cfg['num_classes']
  103. # build model
  104. model = build_model(args, model_cfg, device, num_classes, False)
  105. # load trained weight
  106. model = load_weight(model, args.weight, args.fuse_conv_bn)
  107. model.to(device).eval()
  108. # compute FLOPs and Params
  109. model_copy = deepcopy(model)
  110. model_copy.trainable = False
  111. model_copy.eval()
  112. compute_flops(
  113. model=model_copy,
  114. img_size=args.img_size,
  115. device=device)
  116. del model_copy
  117. # transform
  118. val_transform, trans_cfg = build_transform(args, trans_cfg, model_cfg['max_stride'], is_train=False)
  119. # evaluation
  120. with torch.no_grad():
  121. if args.dataset == 'voc':
  122. voc_test(model, data_dir, device, val_transform)
  123. elif args.dataset == 'coco-val' or args.dataset == 'coco':
  124. coco_test(model, data_dir, device, val_transform, test=False)
  125. elif args.dataset == 'coco-test':
  126. coco_test(model, data_dir, device, val_transform, test=True)
  127. elif args.dataset == 'ourdataset':
  128. our_test(model, data_dir, device, val_transform)