vis_tools.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. import cv2
  2. import os
  3. import numpy as np
  4. import matplotlib.pyplot as plt
  5. # -------------------------- For Detection Task --------------------------
  6. ## Draw bbox & label on the image
  7. def plot_bbox_labels(img, bbox, label=None, cls_color=None, text_scale=0.4):
  8. x1, y1, x2, y2 = bbox
  9. x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
  10. t_size = cv2.getTextSize(label, 0, fontScale=1, thickness=2)[0]
  11. # plot bbox
  12. cv2.rectangle(img, (x1, y1), (x2, y2), cls_color, 2)
  13. if label is not None:
  14. # plot title bbox
  15. cv2.rectangle(img, (x1, y1-t_size[1]), (int(x1 + t_size[0] * text_scale), y1), cls_color, -1)
  16. # put the test on the title bbox
  17. cv2.putText(img, label, (int(x1), int(y1 - 5)), 0, text_scale, (0, 0, 0), 1, lineType=cv2.LINE_AA)
  18. return img
  19. ## Visualize the detection results
  20. def visualize(image, bboxes, scores, labels, class_colors, class_names):
  21. ts = 0.4
  22. for i, bbox in enumerate(bboxes):
  23. cls_id = int(labels[i])
  24. cls_color = class_colors[cls_id]
  25. mess = '%s: %.2f' % (class_names[cls_id], scores[i])
  26. image = plot_bbox_labels(image, bbox, mess, cls_color, text_scale=ts)
  27. return image
  28. ## Visualize the input data during the training stage
  29. def vis_data(images, targets, num_classes=80, pixel_mean=None, pixel_std=None):
  30. """
  31. images: (tensor) [B, 3, H, W]
  32. targets: (list) a list of targets
  33. """
  34. batch_size = images.size(0)
  35. np.random.seed(0)
  36. class_colors = [(np.random.randint(255),
  37. np.random.randint(255),
  38. np.random.randint(255)) for _ in range(num_classes)]
  39. for bi in range(batch_size):
  40. tgt_boxes = targets[bi]['boxes']
  41. tgt_labels = targets[bi]['labels']
  42. # to numpy
  43. image = images[bi].permute(1, 2, 0).cpu().numpy()
  44. # denormalize image
  45. if pixel_mean is not None and pixel_std is not None:
  46. image = image * pixel_std + pixel_mean
  47. image = image.astype(np.uint8)
  48. image = image.copy()
  49. img_h, img_w = image.shape[:2]
  50. # visualize target
  51. for box, label in zip(tgt_boxes, tgt_labels):
  52. x1, y1, x2, y2 = box
  53. x1, y1 = int(x1), int(y1)
  54. x2, y2 = int(x2), int(y2)
  55. cls_id = int(label)
  56. # draw box
  57. color = class_colors[cls_id]
  58. cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
  59. cv2.imshow('train target', image)
  60. cv2.waitKey(0)
  61. ## convert feature to he heatmap
  62. def convert_feature_heatmap(feature):
  63. """
  64. feature: (ndarray) [H, W, C]
  65. """
  66. heatmap = None
  67. return heatmap
  68. ## draw feature on the image
  69. def draw_feature(img, features, save=None):
  70. """
  71. img: (ndarray & cv2.Mat) [H, W, C], where the C is 3 for RGB or 1 for Gray.
  72. features: (List[ndarray]). It is a list of the multiple feature map whose shape is [H, W, C].
  73. save: (bool) save the result or not.
  74. """
  75. img_h, img_w = img.shape[:2]
  76. for i, fmp in enumerate(features):
  77. hmp = convert_feature_heatmap(fmp)
  78. hmp = cv2.resize(hmp, (img_w, img_h))
  79. hmp = hmp.astype(np.uint8)*255
  80. hmp_rgb = cv2.applyColorMap(hmp, cv2.COLORMAP_JET)
  81. superimposed_img = hmp_rgb * 0.4 + img
  82. # show the heatmap
  83. plt.imshow(hmp)
  84. plt.close()
  85. # show the image with heatmap
  86. cv2.imshow("image with heatmap", superimposed_img)
  87. cv2.waitKey(0)
  88. cv2.destroyAllWindows()
  89. if save:
  90. save_dir = 'feature_heatmap'
  91. os.makedirs(save_dir, exist_ok=True)
  92. cv2.imwrite(os.path.join(save_dir, 'feature_{}.png'.format(i) ), superimposed_img)
  93. # -------------------------- For Tracking Task --------------------------
  94. def get_color(idx):
  95. idx = idx * 3
  96. color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)
  97. return color
  98. def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):
  99. im = np.ascontiguousarray(np.copy(image))
  100. im_h, im_w = im.shape[:2]
  101. top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255
  102. #text_scale = max(1, image.shape[1] / 1600.)
  103. #text_thickness = 2
  104. #line_thickness = max(1, int(image.shape[1] / 500.))
  105. text_scale = 2
  106. text_thickness = 2
  107. line_thickness = 3
  108. radius = max(5, int(im_w/140.))
  109. cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),
  110. (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=2)
  111. for i, tlwh in enumerate(tlwhs):
  112. x1, y1, w, h = tlwh
  113. intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))
  114. obj_id = int(obj_ids[i])
  115. id_text = '{}'.format(int(obj_id))
  116. if ids2 is not None:
  117. id_text = id_text + ', {}'.format(int(ids2[i]))
  118. color = get_color(abs(obj_id))
  119. cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)
  120. cv2.putText(im, id_text, (intbox[0], intbox[1]), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),
  121. thickness=text_thickness)
  122. return im