train.sh 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. # Args parameters
  2. MODEL=$1
  3. DATASET=$2
  4. DATASET_ROOT=$3
  5. BATCH_SIZE=$4
  6. WORLD_SIZE=$5
  7. MASTER_PORT=$6
  8. RESUME=$7
  9. # MODEL setting
  10. IMAGE_SIZE=640
  11. FIND_UNUSED_PARAMS=False
  12. if [[ $MODEL == *"yolov8"* ]]; then
  13. # Epoch setting
  14. MAX_EPOCH=500
  15. WP_EPOCH=3
  16. EVAL_EPOCH=10
  17. NO_AUG_EPOCH=20
  18. elif [[ $MODEL == *"yolox"* ]]; then
  19. # Epoch setting
  20. MAX_EPOCH=300
  21. WP_EPOCH=3
  22. EVAL_EPOCH=10
  23. NO_AUG_EPOCH=20
  24. elif [[ $MODEL == *"yolov7"* ]]; then
  25. # Epoch setting
  26. MAX_EPOCH=300
  27. WP_EPOCH=3
  28. EVAL_EPOCH=10
  29. NO_AUG_EPOCH=20
  30. elif [[ $MODEL == *"yolov5"* ]]; then
  31. # Epoch setting
  32. MAX_EPOCH=300
  33. WP_EPOCH=3
  34. EVAL_EPOCH=10
  35. NO_AUG_EPOCH=20
  36. elif [[ $MODEL == *"yolov4"* ]]; then
  37. # Epoch setting
  38. MAX_EPOCH=300
  39. WP_EPOCH=3
  40. EVAL_EPOCH=10
  41. NO_AUG_EPOCH=20
  42. elif [[ $MODEL == *"yolov3"* ]]; then
  43. # Epoch setting
  44. MAX_EPOCH=300
  45. WP_EPOCH=3
  46. EVAL_EPOCH=10
  47. NO_AUG_EPOCH=20
  48. else
  49. # Epoch setting
  50. MAX_EPOCH=150
  51. WP_EPOCH=3
  52. EVAL_EPOCH=10
  53. NO_AUG_EPOCH=10
  54. fi
  55. # -------------------------- Train Pipeline --------------------------
  56. if [ $WORLD_SIZE == 1 ]; then
  57. python train.py \
  58. --cuda \
  59. --dataset ${DATASET} \
  60. --root ${DATASET_ROOT} \
  61. --model ${MODEL} \
  62. --batch_size ${BATCH_SIZE} \
  63. --img_size ${IMAGE_SIZE} \
  64. --wp_epoch ${WP_EPOCH} \
  65. --max_epoch ${MAX_EPOCH} \
  66. --eval_epoch ${EVAL_EPOCH} \
  67. --no_aug_epoch ${NO_AUG_EPOCH} \
  68. --resume ${RESUME} \
  69. --ema \
  70. --fp16 \
  71. --find_unused_parameters ${FIND_UNUSED_PARAMS} \
  72. --multi_scale
  73. elif [[ $WORLD_SIZE -gt 1 && $WORLD_SIZE -le 8 ]]; then
  74. python -m torch.distributed.run --nproc_per_node=${WORLD_SIZE} --master_port ${MASTER_PORT} train.py \
  75. --cuda \
  76. -dist \
  77. --dataset ${DATASET} \
  78. --root ${DATASET_ROOT} \
  79. --model ${MODEL} \
  80. --batch_size ${BATCH_SIZE} \
  81. --img_size ${IMAGE_SIZE} \
  82. --wp_epoch ${WP_EPOCH} \
  83. --max_epoch ${MAX_EPOCH} \
  84. --eval_epoch ${EVAL_EPOCH} \
  85. --no_aug_epoch ${NO_AUG_EPOCH} \
  86. --resume ${RESUME} \
  87. --ema \
  88. --fp16 \
  89. --find_unused_parameters ${FIND_UNUSED_PARAMS} \
  90. --multi_scale \
  91. --sybn
  92. else
  93. echo "The WORLD_SIZE is set to a value greater than 8, indicating the use of multi-machine \
  94. multi-card training mode, which is currently unsupported."
  95. exit 1
  96. fi