|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | +set -e |
| 4 | + |
| 5 | +start=`date +%s` |
| 6 | + |
| 7 | +START_DATE=$(date '+%Y-%m-%d') |
| 8 | + |
| 9 | +PORT=$((9000 + RANDOM % 1000)) |
| 10 | +GPU=0,1 |
| 11 | +NB_GPU=2 |
| 12 | + |
| 13 | +DATA_ROOT=/path/to/cityscapes |
| 14 | + |
| 15 | + |
| 16 | +DATASET=cityscapes_domain |
| 17 | +TASK=1-1 |
| 18 | +NAME=MiB |
| 19 | +METHOD=FT |
| 20 | +OPTIONS="--checkpoint checkpoints/step/ --loss_kd 10.0" |
| 21 | +# --checkpoint checkpoints/step/ |
| 22 | +NB_EPOCHS=50 |
| 23 | + |
| 24 | +SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}" |
| 25 | + |
| 26 | +RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv |
| 27 | +rm -f ${RESULTSFILE} |
| 28 | + |
| 29 | +echo -ne "\ek${SCREENNAME}\e\\" |
| 30 | + |
| 31 | +echo "Writing in ${RESULTSFILE}" |
| 32 | + |
| 33 | + |
| 34 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.01 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 35 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 36 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 37 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 38 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 39 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 40 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 6 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 41 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 7 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 42 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 8 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 43 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 9 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 44 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 10 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 45 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 11 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 46 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 12 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 47 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 13 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 48 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 14 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 49 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 15 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 50 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 16 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 51 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 17 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 52 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 18 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 53 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 19 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 54 | +CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 20 --lr 0.001 --epochs ${NB_EPOCHS} --method ${METHOD} --opt_level O1 ${OPTIONS} |
| 55 | +python3 average_csv.py ${RESULTSFILE} |
| 56 | + |
| 57 | +echo ${SCREENNAME} |
| 58 | + |
| 59 | + |
| 60 | +end=`date +%s` |
| 61 | +runtime=$((end-start)) |
| 62 | +echo "Run in ${runtime}s" |
0 commit comments