瀏覽代碼

fix a bug

冬落 2 年之前
父節點
當前提交
53d021ff81
共有 8 個文件被更改,包括 49 次插入48 次删除
  1. 3 1
      .gitignore
  2. 3 3
      dataset/build.py
  3. 2 1
      dataset/coco.py
  4. 30 37
      dataset/make_dataset.py
  5. 2 1
      dataset/voc.py
  6. 3 3
      evaluator/voc_evaluator.py
  7. 3 2
      train.py
  8. 3 0
      train.sh

+ 3 - 1
.gitignore

@@ -11,4 +11,6 @@ det_results
 deployment/OpenVINO/cpp/build
 deployment/OpenVINO/cpp/build
 cluster.json
 cluster.json
 train_nebula.py
 train_nebula.py
-train_nebula.sh
+train_nebula.sh
+make_data_nebula.sh
+dataset/make_dataset_nebula.py

+ 3 - 3
dataset/build.py

@@ -37,7 +37,7 @@ def build_dataset(args, data_cfg, trans_config, transform, is_train=False):
             image_sets=[('2007', 'trainval'), ('2012', 'trainval')] if is_train else [('2007', 'test')],
             image_sets=[('2007', 'trainval'), ('2012', 'trainval')] if is_train else [('2007', 'test')],
             transform=transform,
             transform=transform,
             trans_config=trans_config,
             trans_config=trans_config,
-            load_cache=args.load_cache
+            load_cache=args.load_cache if is_train else None
             )
             )
     ## COCO dataset
     ## COCO dataset
     elif args.dataset == 'coco':
     elif args.dataset == 'coco':
@@ -47,7 +47,7 @@ def build_dataset(args, data_cfg, trans_config, transform, is_train=False):
             image_set='train2017' if is_train else 'val2017',
             image_set='train2017' if is_train else 'val2017',
             transform=transform,
             transform=transform,
             trans_config=trans_config,
             trans_config=trans_config,
-            load_cache=args.load_cache
+            load_cache=args.load_cache if is_train else None
             )
             )
     ## Custom dataset
     ## Custom dataset
     elif args.dataset == 'ourdataset':
     elif args.dataset == 'ourdataset':
@@ -57,7 +57,7 @@ def build_dataset(args, data_cfg, trans_config, transform, is_train=False):
             image_set='train' if is_train else 'val',
             image_set='train' if is_train else 'val',
             transform=transform,
             transform=transform,
             trans_config=trans_config,
             trans_config=trans_config,
-            load_cache=args.load_cache
+            load_cache=args.load_cache if is_train else None
             )
             )
 
 
     return dataset, dataset_info
     return dataset, dataset_info

+ 2 - 1
dataset/coco.py

@@ -106,6 +106,7 @@ class COCODataset(Dataset):
             self.dataset_size = len(self.cached_datas)
             self.dataset_size = len(self.cached_datas)
             print("Loading done !")
             print("Loading done !")
         except:
         except:
+            self.cached_datas = None
             self.load_cache = None
             self.load_cache = None
             print("{} does not exits.".format(self.load_cache))
             print("{} does not exits.".format(self.load_cache))
 
 
@@ -151,7 +152,7 @@ class COCODataset(Dataset):
     # ------------ Load data function ------------
     # ------------ Load data function ------------
     def load_image_target(self, index):
     def load_image_target(self, index):
         # == Load a data from the cached data ==
         # == Load a data from the cached data ==
-        if self.load_cache and self.is_train:
+        if self.load_cache is not None and self.is_train:
             # load a data
             # load a data
             data_item = self.cached_datas[index]
             data_item = self.cached_datas[index]
             image = data_item["image"]
             image = data_item["image"]

+ 30 - 37
dataset/make_dataset.py

@@ -4,19 +4,24 @@ import torch
 import random
 import random
 import numpy as np
 import numpy as np
 
 
-import sys
-sys.path.append("../")
-from utils import distributed_utils
-from dataset.voc import VOCDataset, VOC_CLASSES
-from dataset.coco import COCODataset, coco_class_labels, coco_class_index
-from config import build_trans_config, build_dataset_config
-
-
-def fix_random_seed(args):
-    seed = args.seed + distributed_utils.get_rank()
-    torch.manual_seed(seed)
-    np.random.seed(seed)
-    random.seed(seed)
+from voc import VOCDataset, VOC_CLASSES
+from coco import COCODataset, coco_class_labels, coco_class_index
+
+dataset_cfg = {
+    'voc': {
+        'data_name': 'VOCdevkit',
+        'num_classes': 20,
+        'class_indexs': None,
+        'class_names': ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'),
+    },
+
+    'coco':{
+        'data_name': 'COCO',
+        'num_classes': 80,
+        'class_indexs': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90],
+        'class_names': ('background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella', 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk', 'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'),
+    },
+}
 
 
 # ------------------------------ Dataset ------------------------------
 # ------------------------------ Dataset ------------------------------
 def build_dataset(args, data_cfg, trans_config, transform, is_train=False):
 def build_dataset(args, data_cfg, trans_config, transform, is_train=False):
@@ -96,11 +101,19 @@ def visualize(image, target, dataset_name="voc"):
     cv2.imshow('gt', image)
     cv2.imshow('gt', image)
     cv2.waitKey(0)
     cv2.waitKey(0)
 
 
+def build_dataset_config(args):
+    if args.dataset in ['coco', 'coco-val', 'coco-test']:
+        cfg = dataset_cfg['coco']
+    else:
+        cfg = dataset_cfg[args.dataset]
+
+    print('==============================')
+    print('Dataset Config: {} \n'.format(cfg))
+
+    return cfg
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-    import argparse
-    from build import build_transform
-    
+    import argparse    
     parser = argparse.ArgumentParser(description='VOC-Dataset')
     parser = argparse.ArgumentParser(description='VOC-Dataset')
 
 
     # Seed
     # Seed
@@ -126,40 +139,20 @@ if __name__ == "__main__":
                         help='mosaic augmentation.')
                         help='mosaic augmentation.')
     parser.add_argument('--mixup', default=None, type=float,
     parser.add_argument('--mixup', default=None, type=float,
                         help='mixup augmentation.')
                         help='mixup augmentation.')
-    # DDP train
-    parser.add_argument('-dist', '--distributed', action='store_true', default=False,
-                        help='distributed training')
-    parser.add_argument('--dist_url', default='env://', 
-                        help='url used to set up distributed training')
-    parser.add_argument('--world_size', default=1, type=int,
-                        help='number of distributed processes')
-    parser.add_argument('--sybn', action='store_true', default=False, 
-                        help='use sybn.')
     # Output
     # Output
     parser.add_argument('--output_dir', type=str, default='cache_data/',
     parser.add_argument('--output_dir', type=str, default='cache_data/',
                         help='data root')
                         help='data root')
     
     
     args = parser.parse_args()
     args = parser.parse_args()
 
 
-    
-    assert args.aug_type in ["yolov5_pico", "yolov5_nano", "yolov5_small", "yolov5_medium", "yolov5_large", "yolov5_huge",
-                             "yolox_pico",  "yolox_nano",  "yolox_small",  "yolox_medium",  "yolox_large",  "yolox_huge"]
-    
 
 
     # ------------- Build transform config -------------
     # ------------- Build transform config -------------
     dataset_cfg  = build_dataset_config(args)
     dataset_cfg  = build_dataset_config(args)
-    trans_config = build_trans_config(args.aug_type)
-
-    # ------------- Build transform -------------
-    transform, trans_config = build_transform(args, trans_config, max_stride=32, is_train=args.is_train)
 
 
     # ------------- Build dataset -------------
     # ------------- Build dataset -------------
-    dataset, dataset_info = build_dataset(args, dataset_cfg, trans_config, transform, is_train=args.is_train)
+    dataset, dataset_info = build_dataset(args, dataset_cfg, trans_config=None, transform=None, is_train=args.is_train)
     print('Data length: ', len(dataset))
     print('Data length: ', len(dataset))
 
 
-    # ---------------------------- Fix random seed ----------------------------
-    fix_random_seed(args)
-
     # ---------------------------- Main process ----------------------------
     # ---------------------------- Main process ----------------------------
     # We only cache the taining data
     # We only cache the taining data
     data_items = []
     data_items = []

+ 2 - 1
dataset/voc.py

@@ -122,6 +122,7 @@ class VOCDataset(data.Dataset):
             self.dataset_size = len(self.cached_datas)
             self.dataset_size = len(self.cached_datas)
             print("Loading done !")
             print("Loading done !")
         except:
         except:
+            self.cached_datas = None
             self.load_cache = None
             self.load_cache = None
             print("{} does not exits.".format(self.load_cache))
             print("{} does not exits.".format(self.load_cache))
 
 
@@ -167,7 +168,7 @@ class VOCDataset(data.Dataset):
     # ------------ Load data function ------------
     # ------------ Load data function ------------
     def load_image_target(self, index):
     def load_image_target(self, index):
         # == Load a data from the cached data ==
         # == Load a data from the cached data ==
-        if self.load_cache and self.is_train:
+        if self.load_cache is not None and self.is_train:
             # load a data
             # load a data
             data_item = self.cached_datas[index]
             data_item = self.cached_datas[index]
             image = data_item["image"]
             image = data_item["image"]

+ 3 - 3
evaluator/voc_evaluator.py

@@ -4,7 +4,7 @@
     Licensed under The MIT License [see LICENSE for details]
     Licensed under The MIT License [see LICENSE for details]
 """
 """
 
 
-from dataset.voc import VOCDetection, VOC_CLASSES
+from dataset.voc import VOCDataset, VOC_CLASSES
 import os
 import os
 import time
 import time
 import numpy as np
 import numpy as np
@@ -43,7 +43,7 @@ class VOCAPIEvaluator():
         self.output_dir = self.get_output_dir('det_results/eval/voc_eval/', self.set_type)
         self.output_dir = self.get_output_dir('det_results/eval/voc_eval/', self.set_type)
 
 
         # dataset
         # dataset
-        self.dataset = VOCDetection(
+        self.dataset = VOCDataset(
             data_dir=data_dir, 
             data_dir=data_dir, 
             image_sets=[('2007', set_type)],
             image_sets=[('2007', set_type)],
             is_train=False)
             is_train=False)
@@ -131,7 +131,7 @@ class VOCAPIEvaluator():
         """
         """
         filedir = os.path.join(name, phase)
         filedir = os.path.join(name, phase)
         if not os.path.exists(filedir):
         if not os.path.exists(filedir):
-            os.makedirs(filedir)
+            os.makedirs(filedir, exist_ok=True)
         return filedir
         return filedir
 
 
 
 

+ 3 - 2
train.py

@@ -89,10 +89,11 @@ def parse_args():
                         help='data root')
                         help='data root')
     parser.add_argument('-d', '--dataset', default='coco',
     parser.add_argument('-d', '--dataset', default='coco',
                         help='coco, voc, widerface, crowdhuman')
                         help='coco, voc, widerface, crowdhuman')
-    parser.add_argument('--load_cache', action='store_true', default=False,
-                        help='load data into memory.')
+    parser.add_argument('--load_cache', type=str, default=None,
+                        help='Path to the cached data.')
     parser.add_argument('--num_workers', default=4, type=int, 
     parser.add_argument('--num_workers', default=4, type=int, 
                         help='Number of workers used in dataloading')
                         help='Number of workers used in dataloading')
+    
     # Train trick
     # Train trick
     parser.add_argument('-ms', '--multi_scale', action='store_true', default=False,
     parser.add_argument('-ms', '--multi_scale', action='store_true', default=False,
                         help='Multi scale')
                         help='Multi scale')

+ 3 - 0
train.sh

@@ -2,6 +2,7 @@
 DATASET="coco"
 DATASET="coco"
 DATA_ROOT="/data/datasets/"
 DATA_ROOT="/data/datasets/"
 # DATA_ROOT="/Users/liuhaoran/Desktop/python_work/object-detection/dataset/"
 # DATA_ROOT="/Users/liuhaoran/Desktop/python_work/object-detection/dataset/"
+CACHED_DATA="${DATA_ROOT}/coco_train.pth"
 
 
 # MODEL setting
 # MODEL setting
 MODEL="yolov8_l"
 MODEL="yolov8_l"
@@ -58,6 +59,7 @@ if [ $WORLD_SIZE == 1 ]; then
             --cuda \
             --cuda \
             --dataset ${DATASET} \
             --dataset ${DATASET} \
             --root ${DATA_ROOT} \
             --root ${DATA_ROOT} \
+            --load_cache ${CACHED_DATA} \
             --model ${MODEL} \
             --model ${MODEL} \
             --batch_size ${BATCH_SIZE} \
             --batch_size ${BATCH_SIZE} \
             --img_size ${IMAGE_SIZE} \
             --img_size ${IMAGE_SIZE} \
@@ -75,6 +77,7 @@ elif [[ $WORLD_SIZE -gt 1 && $WORLD_SIZE -le 8 ]]; then
             -dist \
             -dist \
             --dataset ${DATASET} \
             --dataset ${DATASET} \
             --root ${DATA_ROOT} \
             --root ${DATA_ROOT} \
+            --load_cache ${CACHED_DATA} \
             --model ${MODEL} \
             --model ${MODEL} \
             --batch_size ${BATCH_SIZE} \
             --batch_size ${BATCH_SIZE} \
             --img_size ${IMAGE_SIZE} \
             --img_size ${IMAGE_SIZE} \