train.sh 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. # Dataset setting
  2. DATASET="coco"
  3. DATA_ROOT="/data/datasets/"
  4. # MODEL setting
  5. MODEL="yolox_s"
  6. IMAGE_SIZE=640
  7. RESUME="None"
  8. if [[ $MODEL == *"yolov8"* ]]; then
  9. # Epoch setting
  10. BATCH_SIZE=128
  11. MAX_EPOCH=500
  12. WP_EPOCH=3
  13. EVAL_EPOCH=10
  14. NO_AUG_EPOCH=20
  15. elif [[ $MODEL == *"yolox2"* ]]; then
  16. # Epoch setting
  17. BATCH_SIZE=128
  18. MAX_EPOCH=300
  19. WP_EPOCH=3
  20. EVAL_EPOCH=10
  21. NO_AUG_EPOCH=20
  22. elif [[ $MODEL == *"yolox"* ]]; then
  23. # Epoch setting
  24. BATCH_SIZE=128
  25. MAX_EPOCH=300
  26. WP_EPOCH=3
  27. EVAL_EPOCH=10
  28. NO_AUG_EPOCH=20
  29. elif [[ $MODEL == *"yolov7"* ]]; then
  30. # Epoch setting
  31. BATCH_SIZE=128
  32. MAX_EPOCH=300
  33. WP_EPOCH=3
  34. EVAL_EPOCH=10
  35. NO_AUG_EPOCH=20
  36. elif [[ $MODEL == *"yolov5"* ]]; then
  37. # Epoch setting
  38. BATCH_SIZE=128
  39. MAX_EPOCH=300
  40. WP_EPOCH=3
  41. EVAL_EPOCH=10
  42. NO_AUG_EPOCH=20
  43. elif [[ $MODEL == *"yolov4"* ]]; then
  44. # Epoch setting
  45. BATCH_SIZE=128
  46. MAX_EPOCH=300
  47. WP_EPOCH=3
  48. EVAL_EPOCH=10
  49. NO_AUG_EPOCH=20
  50. elif [[ $MODEL == *"yolov3"* ]]; then
  51. # Epoch setting
  52. BATCH_SIZE=128
  53. MAX_EPOCH=300
  54. WP_EPOCH=3
  55. EVAL_EPOCH=10
  56. NO_AUG_EPOCH=20
  57. else
  58. # Epoch setting
  59. BATCH_SIZE=128
  60. MAX_EPOCH=150
  61. WP_EPOCH=3
  62. EVAL_EPOCH=10
  63. NO_AUG_EPOCH=0
  64. fi
  65. # -------------------------- Train Pipeline --------------------------
  66. WORLD_SIZE=$1
  67. if [ $WORLD_SIZE == 1 ]; then
  68. python train.py \
  69. --cuda \
  70. --dataset ${DATASET} \
  71. --root ${DATA_ROOT} \
  72. --model ${MODEL} \
  73. --batch_size ${BATCH_SIZE} \
  74. --img_size ${IMAGE_SIZE} \
  75. --wp_epoch ${WP_EPOCH} \
  76. --max_epoch ${MAX_EPOCH} \
  77. --eval_epoch ${EVAL_EPOCH} \
  78. --no_aug_epoch ${NO_AUG_EPOCH} \
  79. --resume ${RESUME} \
  80. --ema \
  81. --fp16 \
  82. --multi_scale
  83. elif [[ $WORLD_SIZE -gt 1 && $WORLD_SIZE -le 8 ]]; then
  84. python -m torch.distributed.run --nproc_per_node=${WORLD_SIZE} --master_port 1669 train.py \
  85. --cuda \
  86. -dist \
  87. --dataset ${DATASET} \
  88. --root ${DATA_ROOT} \
  89. --model ${MODEL} \
  90. --batch_size ${BATCH_SIZE} \
  91. --img_size ${IMAGE_SIZE} \
  92. --wp_epoch ${WP_EPOCH} \
  93. --max_epoch ${MAX_EPOCH} \
  94. --eval_epoch ${EVAL_EPOCH} \
  95. --no_aug_epoch ${NO_AUG_EPOCH} \
  96. --resume ${RESUME} \
  97. --ema \
  98. --fp16 \
  99. --multi_scale \
  100. --sybn
  101. else
  102. echo "The WORLD_SIZE is set to a value greater than 8, indicating the use of multi-machine \
  103. multi-card training mode, which is currently unsupported."
  104. exit 1
  105. fi