customed.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. import os
  2. import cv2
  3. import time
  4. import random
  5. import numpy as np
  6. from torch.utils.data import Dataset
  7. try:
  8. from pycocotools.coco import COCO
  9. except:
  10. print("It seems that the COCOAPI is not installed.")
  11. try:
  12. from .data_augment.strong_augment import MosaicAugment, MixupAugment
  13. except:
  14. from data_augment.strong_augment import MosaicAugment, MixupAugment
  15. class CustomedDataset(Dataset):
  16. def __init__(self,
  17. img_size :int = 640,
  18. data_dir :str = None,
  19. image_set :str = 'train',
  20. transform = None,
  21. trans_config = None,
  22. is_train :bool =False,
  23. load_cache :bool = False,
  24. ):
  25. # ----------- Basic parameters -----------
  26. self.img_size = img_size
  27. self.image_set = image_set
  28. self.is_train = is_train
  29. # ----------- Path parameters -----------
  30. self.data_dir = data_dir
  31. self.json_file = '{}.json'.format(image_set)
  32. # ----------- Data parameters -----------
  33. self.coco = COCO(os.path.join(self.data_dir, image_set, 'annotations', self.json_file))
  34. self.ids = self.coco.getImgIds()
  35. self.class_ids = sorted(self.coco.getCatIds())
  36. self.dataset_size = len(self.ids)
  37. # ----------- Transform parameters -----------
  38. self.trans_config = trans_config
  39. self.transform = transform
  40. # ----------- Strong augmentation -----------
  41. if is_train:
  42. self.mosaic_prob = trans_config['mosaic_prob'] if trans_config else 0.0
  43. self.mixup_prob = trans_config['mixup_prob'] if trans_config else 0.0
  44. self.mosaic_augment = MosaicAugment(img_size, trans_config, is_train)
  45. self.mixup_augment = MixupAugment(img_size, trans_config)
  46. else:
  47. self.mosaic_prob = 0.0
  48. self.mixup_prob = 0.0
  49. self.mosaic_augment = None
  50. self.mixup_augment = None
  51. print('==============================')
  52. print('Image Set: {}'.format(image_set))
  53. print('Json file: {}'.format(self.json_file))
  54. print('use Mosaic Augmentation: {}'.format(self.mosaic_prob))
  55. print('use Mixup Augmentation: {}'.format(self.mixup_prob))
  56. print('==============================')
  57. # ----------- Cached data -----------
  58. self.load_cache = load_cache
  59. self.cached_datas = None
  60. if self.load_cache:
  61. self.cached_datas = self._load_cache()
  62. # ------------ Basic dataset function ------------
  63. def __len__(self):
  64. return len(self.ids)
  65. def __getitem__(self, index):
  66. return self.pull_item(index)
  67. def _load_cache(self):
  68. data_items = []
  69. for idx in range(self.dataset_size):
  70. if idx % 2000 == 0:
  71. print("Caching images and targets : {} / {} ...".format(idx, self.dataset_size))
  72. # load a data
  73. image, target = self.load_image_target(idx)
  74. orig_h, orig_w, _ = image.shape
  75. # resize image
  76. r = self.img_size / max(orig_h, orig_w)
  77. if r != 1:
  78. interp = cv2.INTER_LINEAR
  79. new_size = (int(orig_w * r), int(orig_h * r))
  80. image = cv2.resize(image, new_size, interpolation=interp)
  81. img_h, img_w = image.shape[:2]
  82. # rescale bbox
  83. boxes = target["boxes"].copy()
  84. boxes[:, [0, 2]] = boxes[:, [0, 2]] / orig_w * img_w
  85. boxes[:, [1, 3]] = boxes[:, [1, 3]] / orig_h * img_h
  86. target["boxes"] = boxes
  87. dict_item = {}
  88. dict_item["image"] = image
  89. dict_item["target"] = target
  90. data_items.append(dict_item)
  91. return data_items
  92. # ------------ Mosaic & Mixup ------------
  93. def load_mosaic(self, index):
  94. # ------------ Prepare 4 indexes of images ------------
  95. ## Load 4x mosaic image
  96. index_list = np.arange(index).tolist() + np.arange(index+1, len(self.ids)).tolist()
  97. id1 = index
  98. id2, id3, id4 = random.sample(index_list, 3)
  99. indexs = [id1, id2, id3, id4]
  100. ## Load images and targets
  101. image_list = []
  102. target_list = []
  103. for index in indexs:
  104. img_i, target_i = self.load_image_target(index)
  105. image_list.append(img_i)
  106. target_list.append(target_i)
  107. # ------------ Mosaic augmentation ------------
  108. image, target = self.mosaic_augment(image_list, target_list)
  109. return image, target
  110. def load_mixup(self, origin_image, origin_target):
  111. # ------------ Load a new image & target ------------
  112. if self.mixup_augment.mixup_type == 'yolov5':
  113. new_index = np.random.randint(0, len(self.ids))
  114. new_image, new_target = self.load_mosaic(new_index)
  115. elif self.mixup_augment.mixup_type == 'yolox':
  116. new_index = np.random.randint(0, len(self.ids))
  117. new_image, new_target = self.load_image_target(new_index)
  118. # ------------ Mixup augmentation ------------
  119. image, target = self.mixup_augment(origin_image, origin_target, new_image, new_target)
  120. return image, target
  121. # ------------ Load data function ------------
  122. def load_image_target(self, index):
  123. # == Load a data from the cached data ==
  124. if self.cached_datas is not None:
  125. # load a data
  126. data_item = self.cached_datas[index]
  127. image = data_item["image"]
  128. target = data_item["target"]
  129. # == Load a data from the local disk ==
  130. else:
  131. # load an image
  132. image, _ = self.pull_image(index)
  133. height, width, channels = image.shape
  134. # load a target
  135. bboxes, labels = self.pull_anno(index)
  136. target = {
  137. "boxes": bboxes,
  138. "labels": labels,
  139. "orig_size": [height, width]
  140. }
  141. return image, target
  142. def pull_item(self, index):
  143. if random.random() < self.mosaic_prob:
  144. # load a mosaic image
  145. mosaic = True
  146. image, target = self.load_mosaic(index)
  147. else:
  148. mosaic = False
  149. # load an image and target
  150. image, target = self.load_image_target(index)
  151. # MixUp
  152. if random.random() < self.mixup_prob:
  153. image, target = self.load_mixup(image, target)
  154. # augment
  155. image, target, deltas = self.transform(image, target, mosaic)
  156. return image, target, deltas
  157. def pull_image(self, index):
  158. id_ = self.ids[index]
  159. im_ann = self.coco.loadImgs(id_)[0]
  160. img_file = os.path.join(
  161. self.data_dir, self.image_set, 'images', im_ann["file_name"])
  162. image = cv2.imread(img_file)
  163. return image, id_
  164. def pull_anno(self, index):
  165. img_id = self.ids[index]
  166. im_ann = self.coco.loadImgs(img_id)[0]
  167. anno_ids = self.coco.getAnnIds(imgIds=[int(img_id)], iscrowd=0)
  168. annotations = self.coco.loadAnns(anno_ids)
  169. # image infor
  170. width = im_ann['width']
  171. height = im_ann['height']
  172. #load a target
  173. bboxes = []
  174. labels = []
  175. for anno in annotations:
  176. if 'bbox' in anno and anno['area'] > 0:
  177. # bbox
  178. x1 = np.max((0, anno['bbox'][0]))
  179. y1 = np.max((0, anno['bbox'][1]))
  180. x2 = np.min((width - 1, x1 + np.max((0, anno['bbox'][2] - 1))))
  181. y2 = np.min((height - 1, y1 + np.max((0, anno['bbox'][3] - 1))))
  182. if x2 <= x1 or y2 <= y1:
  183. continue
  184. # class label
  185. cls_id = self.class_ids.index(anno['category_id'])
  186. bboxes.append([x1, y1, x2, y2])
  187. labels.append(cls_id)
  188. # guard against no boxes via resizing
  189. bboxes = np.array(bboxes).reshape(-1, 4)
  190. labels = np.array(labels).reshape(-1)
  191. return bboxes, labels
  192. if __name__ == "__main__":
  193. import time
  194. import argparse
  195. from build import build_transform
  196. import sys
  197. sys.path.append("..")
  198. from config.data_config.dataset_config import dataset_cfg
  199. data_config = dataset_cfg["customed"]
  200. categories = data_config["class_names"]
  201. parser = argparse.ArgumentParser(description='RT-ODLab')
  202. # opt
  203. parser.add_argument('--root', default='/Users/liuhaoran/Desktop/python_work/object-detection/dataset/AnimalDataset/',
  204. help='data root')
  205. parser.add_argument('--split', default='train',
  206. help='data split')
  207. parser.add_argument('-size', '--img_size', default=640, type=int,
  208. help='input image size')
  209. parser.add_argument('--min_box_size', default=8.0, type=float,
  210. help='min size of target bounding box.')
  211. parser.add_argument('--mosaic', default=None, type=float,
  212. help='mosaic augmentation.')
  213. parser.add_argument('--mixup', default=None, type=float,
  214. help='mixup augmentation.')
  215. parser.add_argument('--is_train', action="store_true", default=False,
  216. help='mixup augmentation.')
  217. parser.add_argument('--load_cache', action="store_true", default=False,
  218. help='load cached data.')
  219. args = parser.parse_args()
  220. trans_config = {
  221. 'aug_type': args.aug_type, # optional: ssd, yolov5
  222. 'pixel_mean': [0., 0., 0.],
  223. 'pixel_std': [255., 255., 255.],
  224. # Basic Augment
  225. 'degrees': 0.0,
  226. 'translate': 0.2,
  227. 'scale': [0.1, 2.0],
  228. 'shear': 0.0,
  229. 'perspective': 0.0,
  230. 'hsv_h': 0.015,
  231. 'hsv_s': 0.7,
  232. 'hsv_v': 0.4,
  233. 'use_ablu': True,
  234. # Mosaic & Mixup
  235. 'mosaic_prob': args.mosaic,
  236. 'mixup_prob': args.mixup,
  237. 'mosaic_type': 'yolov5_mosaic',
  238. 'mixup_type': args.mixup_type, # optional: yolov5_mixup, yolox_mixup
  239. 'mosaic_keep_ratio': False,
  240. 'mixup_scale': [0.5, 1.5]
  241. }
  242. transform, trans_cfg = build_transform(args, trans_config, 32, args.is_train)
  243. pixel_mean = transform.pixel_mean
  244. pixel_std = transform.pixel_std
  245. color_format = transform.color_format
  246. dataset = CustomedDataset(
  247. img_size=args.img_size,
  248. data_dir=args.root,
  249. image_set=args.split,
  250. transform=transform,
  251. trans_config=trans_config,
  252. is_train=args.is_train,
  253. load_cache=args.load_cache
  254. )
  255. np.random.seed(0)
  256. class_colors = [(np.random.randint(255),
  257. np.random.randint(255),
  258. np.random.randint(255)) for _ in range(80)]
  259. print('Data length: ', len(dataset))
  260. for i in range(1000):
  261. t0 = time.time()
  262. image, target, deltas = dataset.pull_item(i)
  263. print("Load data: {} s".format(time.time() - t0))
  264. # to numpy
  265. image = image.permute(1, 2, 0).numpy()
  266. # denormalize
  267. image = image * pixel_std + pixel_mean
  268. if color_format == 'rgb':
  269. # RGB to BGR
  270. image = image[..., (2, 1, 0)]
  271. # to uint8
  272. image = image.astype(np.uint8)
  273. image = image.copy()
  274. img_h, img_w = image.shape[:2]
  275. boxes = target["boxes"]
  276. labels = target["labels"]
  277. for box, label in zip(boxes, labels):
  278. x1, y1, x2, y2 = box
  279. cls_id = int(label)
  280. color = class_colors[cls_id]
  281. # class name
  282. label = categories[cls_id]
  283. if x2 - x1 > 0. and y2 - y1 > 0.:
  284. # draw bbox
  285. image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
  286. # put the test on the bbox
  287. cv2.putText(image, label, (int(x1), int(y1 - 5)), 0, 0.5, color, 1, lineType=cv2.LINE_AA)
  288. cv2.imshow('gt', image)
  289. # cv2.imwrite(str(i)+'.jpg', img)
  290. cv2.waitKey(0)