crowdhuman.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. import os
  2. import cv2
  3. import random
  4. import numpy as np
  5. from torch.utils.data import Dataset
  6. try:
  7. from pycocotools.coco import COCO
  8. except:
  9. print("It seems that the COCOAPI is not installed.")
  10. try:
  11. from .data_augment.strong_augment import MosaicAugment, MixupAugment
  12. except:
  13. from data_augment.strong_augment import MosaicAugment, MixupAugment
  14. crowd_class_labels = ('person',)
  15. class CrowdHumanDataset(Dataset):
  16. def __init__(self,
  17. img_size :int = 640,
  18. data_dir :str = None,
  19. image_set :str = 'train',
  20. trans_config = None,
  21. transform = None,
  22. is_train :bool = False
  23. ):
  24. # ----------- Basic parameters -----------
  25. self.img_size = img_size
  26. self.image_set = image_set
  27. self.is_train = is_train
  28. # ----------- Path parameters -----------
  29. self.data_dir = data_dir
  30. self.json_file = '{}.json'.format(image_set)
  31. # ----------- Data parameters -----------
  32. self.coco = COCO(os.path.join(self.data_dir, 'annotations', self.json_file))
  33. self.ids = self.coco.getImgIds()
  34. self.class_ids = sorted(self.coco.getCatIds())
  35. # ----------- Transform parameters -----------
  36. self.trans_config = trans_config
  37. self.transform = transform
  38. # ----------- Strong augmentation -----------
  39. if is_train:
  40. self.mosaic_prob = trans_config['mosaic_prob'] if trans_config else 0.0
  41. self.mixup_prob = trans_config['mixup_prob'] if trans_config else 0.0
  42. self.mosaic_augment = MosaicAugment(img_size, trans_config, is_train) if self.mosaic_prob > 0. else None
  43. self.mixup_augment = MixupAugment(img_size, trans_config) if self.mixup_prob > 0. else None
  44. else:
  45. self.mosaic_prob = 0.0
  46. self.mixup_prob = 0.0
  47. self.mosaic_augment = None
  48. self.mixup_augment = None
  49. print('==============================')
  50. print('use Mosaic Augmentation: {}'.format(self.mosaic_prob))
  51. print('use Mixup Augmentation: {}'.format(self.mixup_prob))
  52. # ------------ Basic dataset function ------------
  53. def __len__(self):
  54. return len(self.ids)
  55. def __getitem__(self, index):
  56. return self.pull_item(index)
  57. # ------------ Mosaic & Mixup ------------
  58. def load_mosaic(self, index):
  59. # ------------ Prepare 4 indexes of images ------------
  60. ## Load 4x mosaic image
  61. index_list = np.arange(index).tolist() + np.arange(index+1, len(self.ids)).tolist()
  62. id1 = index
  63. id2, id3, id4 = random.sample(index_list, 3)
  64. indexs = [id1, id2, id3, id4]
  65. ## Load images and targets
  66. image_list = []
  67. target_list = []
  68. for index in indexs:
  69. img_i, target_i = self.load_image_target(index)
  70. image_list.append(img_i)
  71. target_list.append(target_i)
  72. # ------------ Mosaic augmentation ------------
  73. image, target = self.mosaic_augment(image_list, target_list)
  74. return image, target
  75. def load_mixup(self, origin_image, origin_target):
  76. # ------------ Load a new image & target ------------
  77. if self.mixup_augment.mixup_type == 'yolov5':
  78. new_index = np.random.randint(0, len(self.ids))
  79. new_image, new_target = self.load_mosaic(new_index)
  80. elif self.mixup_augment.mixup_type == 'yolox':
  81. new_index = np.random.randint(0, len(self.ids))
  82. new_image, new_target = self.load_image_target(new_index)
  83. # ------------ Mixup augmentation ------------
  84. image, target = self.mixup_augment(origin_image, origin_target, new_image, new_target)
  85. return image, target
  86. # ------------ Load data function ------------
  87. def load_image_target(self, index):
  88. # load an image
  89. image, _ = self.pull_image(index)
  90. height, width, channels = image.shape
  91. # load a target
  92. bboxes, labels = self.pull_anno(index)
  93. target = {
  94. "boxes": bboxes,
  95. "labels": labels,
  96. "orig_size": [height, width]
  97. }
  98. return image, target
  99. def pull_item(self, index):
  100. if random.random() < self.mosaic_prob:
  101. # load a mosaic image
  102. mosaic = True
  103. image, target = self.load_mosaic(index)
  104. else:
  105. mosaic = False
  106. # load an image and target
  107. image, target = self.load_image_target(index)
  108. # MixUp
  109. if random.random() < self.mixup_prob:
  110. image, target = self.load_mixup(image, target)
  111. # augment
  112. image, target, deltas = self.transform(image, target, mosaic)
  113. return image, target, deltas
  114. def pull_image(self, index):
  115. id_ = self.ids[index]
  116. im_ann = self.coco.loadImgs(id_)[0]
  117. img_id = im_ann["file_name"][:-4]
  118. img_file = os.path.join(
  119. self.data_dir, 'CrowdHuman_{}'.format(self.image_set), 'Images', im_ann["file_name"])
  120. image = cv2.imread(img_file)
  121. return image, img_id
  122. def pull_anno(self, index):
  123. img_id = self.ids[index]
  124. im_ann = self.coco.loadImgs(img_id)[0]
  125. anno_ids = self.coco.getAnnIds(imgIds=[int(img_id)], iscrowd=0)
  126. annotations = self.coco.loadAnns(anno_ids)
  127. # image infor
  128. width = im_ann['width']
  129. height = im_ann['height']
  130. #load a target
  131. bboxes = []
  132. labels = []
  133. for anno in annotations:
  134. if 'bbox' in anno and anno['area'] > 0:
  135. # bbox
  136. x1 = np.max((0, anno['bbox'][0]))
  137. y1 = np.max((0, anno['bbox'][1]))
  138. x2 = np.min((width - 1, x1 + np.max((0, anno['bbox'][2] - 1))))
  139. y2 = np.min((height - 1, y1 + np.max((0, anno['bbox'][3] - 1))))
  140. if x2 <= x1 or y2 <= y1:
  141. continue
  142. # class label
  143. cls_id = self.class_ids.index(anno['category_id'])
  144. bboxes.append([x1, y1, x2, y2])
  145. labels.append(cls_id)
  146. # guard against no boxes via resizing
  147. bboxes = np.array(bboxes).reshape(-1, 4)
  148. labels = np.array(labels).reshape(-1)
  149. return bboxes, labels
  150. if __name__ == "__main__":
  151. import time
  152. import argparse
  153. from build import build_transform
  154. parser = argparse.ArgumentParser(description='CrowdHuman-Dataset')
  155. # opt
  156. parser.add_argument('--root', default='/Users/liuhaoran/Desktop/python_work/object-detection/dataset/CrowdHuman/',
  157. help='data root')
  158. parser.add_argument('-size', '--img_size', default=640, type=int,
  159. help='input image size.')
  160. parser.add_argument('--aug_type', type=str, default='ssd',
  161. help='augmentation type')
  162. parser.add_argument('--mosaic', default=0., type=float,
  163. help='mosaic augmentation.')
  164. parser.add_argument('--mixup', default=0., type=float,
  165. help='mixup augmentation.')
  166. parser.add_argument('--is_train', action="store_true", default=False,
  167. help='mixup augmentation.')
  168. args = parser.parse_args()
  169. trans_config = {
  170. 'aug_type': args.aug_type, # optional: ssd, yolov5
  171. 'pixel_mean': [0., 0., 0.],
  172. 'pixel_std': [255., 255., 255.],
  173. # Basic Augment
  174. 'degrees': 0.0,
  175. 'translate': 0.2,
  176. 'scale': [0.1, 2.0],
  177. 'shear': 0.0,
  178. 'perspective': 0.0,
  179. 'hsv_h': 0.015,
  180. 'hsv_s': 0.7,
  181. 'hsv_v': 0.4,
  182. 'use_ablu': True,
  183. # Mosaic & Mixup
  184. 'mosaic_prob': args.mosaic,
  185. 'mixup_prob': args.mixup,
  186. 'mosaic_type': 'yolov5',
  187. 'mixup_type': 'yolov5', # optional: yolov5, yolox
  188. 'mosaic_keep_ratio': False,
  189. 'mixup_scale': [0.5, 1.5]
  190. }
  191. transform, trans_cfg = build_transform(args, trans_config, 32, args.is_train)
  192. pixel_mean = transform.pixel_mean
  193. pixel_std = transform.pixel_std
  194. color_format = transform.color_format
  195. dataset = CrowdHumanDataset(
  196. img_size=args.img_size,
  197. data_dir=args.root,
  198. image_set='val',
  199. transform=transform,
  200. trans_config=trans_config,
  201. )
  202. np.random.seed(0)
  203. class_colors = [(np.random.randint(255),
  204. np.random.randint(255),
  205. np.random.randint(255)) for _ in range(80)]
  206. print('Data length: ', len(dataset))
  207. for i in range(1000):
  208. t0 = time.time()
  209. image, target, deltas = dataset.pull_item(i)
  210. print("Load data: {} s".format(time.time() - t0))
  211. # to numpy
  212. image = image.permute(1, 2, 0).numpy()
  213. # denormalize
  214. image = image * pixel_std + pixel_mean
  215. if color_format == 'rgb':
  216. # RGB to BGR
  217. image = image[..., (2, 1, 0)]
  218. # to uint8
  219. image = image.astype(np.uint8)
  220. image = image.copy()
  221. img_h, img_w = image.shape[:2]
  222. boxes = target["boxes"]
  223. labels = target["labels"]
  224. for box, label in zip(boxes, labels):
  225. x1, y1, x2, y2 = box
  226. cls_id = int(label)
  227. color = class_colors[cls_id]
  228. # class name
  229. label = crowd_class_labels[cls_id]
  230. image = cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (0,0,255), 2)
  231. # put the test on the bbox
  232. cv2.putText(image, label, (int(x1), int(y1 - 5)), 0, 0.5, color, 1, lineType=cv2.LINE_AA)
  233. cv2.imshow('gt', image)
  234. # cv2.imwrite(str(i)+'.jpg', img)
  235. cv2.waitKey(0)