Skip to content

Commit 8ca5d66

Browse files
Add scrip ade 100--50.
1 parent af895b4 commit 8ca5d66

File tree

1 file changed

+52
-0
lines changed

1 file changed

+52
-0
lines changed

scripts/ade/plop_ade_100-50.sh

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
#!/bin/bash
2+
3+
set -e
4+
5+
start=`date +%s`
6+
7+
START_DATE=$(date '+%Y-%m-%d')
8+
9+
PORT=$((9000 + RANDOM % 1000))
10+
GPU=0,1
11+
NB_GPU=2
12+
13+
DATA_ROOT=/path/to/my/ade
14+
15+
DATASET=ade
16+
TASK=100-10
17+
NAME=PLOP
18+
METHOD=FT
19+
OPTIONS="--checkpoint checkpoints/step/ --pod local --pod_factor 0.001 --pod_logits --pseudo entropy --threshold 0.001 --classif_adaptive_factor --init_balanced"
20+
21+
SCREENNAME="${DATASET}_${TASK}_${NAME} On GPUs ${GPU}"
22+
23+
RESULTSFILE=results/${START_DATE}_${DATASET}_${TASK}_${NAME}.csv
24+
rm -f ${RESULTSFILE}
25+
26+
echo -ne "\ek${SCREENNAME}\e\\"
27+
28+
echo "Writing in ${RESULTSFILE}"
29+
30+
# If you already trained the model for the first step, you can re-use those weights
31+
# in order to skip this initial step --> faster iteration on your model
32+
# Set this variable with the weights path
33+
# FIRSTMODEL=/path/to/my/first/weights
34+
# Then, for the first step, append those options:
35+
# --ckpt ${FIRSTMODEL} --test
36+
# And for the second step, this option:
37+
# --step_ckpt ${FIRSTMODEL}
38+
39+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 0 --lr 0.01 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS}
40+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 1 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
41+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 2 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
42+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 3 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
43+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 4 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
44+
CUDA_VISIBLE_DEVICES=${GPU} python3 -m torch.distributed.launch --master_port ${PORT} --nproc_per_node=${NB_GPU} run.py --date ${START_DATE} --data_root ${DATA_ROOT} --overlap --batch_size 12 --dataset ${DATASET} --name ${NAME} --task ${TASK} --step 5 --lr 0.001 --epochs 60 --method ${METHOD} --opt_level O1 ${OPTIONS} --pod_options "{\"switch\": {\"after\": {\"extra_channels\": \"sum\", \"factor\": 0.00001, \"type\": \"local\"}}}"
45+
python3 average_csv.py ${RESULTSFILE}
46+
47+
echo ${SCREENNAME}
48+
49+
50+
end=`date +%s`
51+
runtime=$((end-start))
52+
echo "Run in ${runtime}s"

0 commit comments

Comments
 (0)