garbage.py 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. from __future__ import division
  2. import os
  3. import random
  4. import numpy as np
  5. import argparse
  6. import time
  7. # ----------------- Torch Components -----------------
  8. import torch
  9. # ----------------- Extra Components -----------------
  10. from utils import distributed_utils
  11. def parse_args():
  12. parser = argparse.ArgumentParser(description='Real-time Object Detection LAB')
  13. # Random seed
  14. parser.add_argument('--seed', default=42, type=int)
  15. # GPU
  16. parser.add_argument('--cuda', action='store_true', default=False,
  17. help='use cuda.')
  18. # DDP train
  19. parser.add_argument('-dist', '--distributed', action='store_true', default=False,
  20. help='distributed training')
  21. parser.add_argument('--dist_url', default='env://',
  22. help='url used to set up distributed training')
  23. parser.add_argument('--world_size', default=1, type=int,
  24. help='number of distributed processes')
  25. parser.add_argument('--sybn', action='store_true', default=False,
  26. help='use sybn.')
  27. parser.add_argument('--find_unused_parameters', action='store_true', default=False,
  28. help='set find_unused_parameters as True.')
  29. return parser.parse_args()
  30. def train():
  31. args = parse_args()
  32. print("Setting Arguments.. : ", args)
  33. print("----------------------------------------------------------")
  34. # ---------------------------- Build DDP ----------------------------
  35. local_rank = local_process_rank = -1
  36. if args.distributed:
  37. distributed_utils.init_distributed_mode(args)
  38. print("git:\n {}\n".format(distributed_utils.get_sha()))
  39. try:
  40. # Multiple Mechine & Multiple GPUs (world size > 8)
  41. local_rank = torch.distributed.get_rank()
  42. local_process_rank = int(os.getenv('LOCAL_PROCESS_RANK', '0'))
  43. except:
  44. # Single Mechine & Multiple GPUs (world size <= 8)
  45. local_rank = local_process_rank = torch.distributed.get_rank()
  46. world_size = distributed_utils.get_world_size()
  47. print("LOCAL RANK: ", local_rank)
  48. print("LOCAL_PROCESS_RANL: ", local_process_rank)
  49. print('WORLD SIZE: {}'.format(world_size))
  50. # ---------------------------- Build CUDA ----------------------------
  51. if args.cuda and torch.cuda.is_available():
  52. print('use cuda')
  53. device = torch.device("cuda")
  54. else:
  55. device = torch.device("cpu")
  56. print("Create a garbage ...")
  57. garbage = torch.randn(900, 1024, 80, 80).to(device) # 15 G
  58. # 10 days
  59. for i in range(10):
  60. print("Day-{} run garbage ...".format(i))
  61. decay = 60.0 * 60.0 * 24.0 # 1 day
  62. time.sleep(decay)
  63. del garbage
  64. if args.cuda:
  65. torch.cuda.empty_cache()
  66. if __name__ == '__main__':
  67. train()