-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.sh
72 lines (68 loc) · 2.95 KB
/
train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# Common hyperparameters:
epochs=30 # Number of epochs
dataset_shape=256 # CT image size (squared)
res_dir='/mnt/data_jixie1/clma' # enter your directory for storing result
# Scripts for training FreeSeed (global skip defined in trainer):
num_views=72
network='freeseed_0.5_1_5'
CUDA_VISIBLE_DEVICES="0" python -m torch.distributed.launch \
--master_port 10021 --nproc_per_node 1 \
main.py --epochs $epochs \
--lr 1e-4 --optimizer 'adam' \
--scheduler 'step' --step_size 10 --step_gamma 0.5 \
--dataset_name 'aapm' --dataset_shape $dataset_shape \
--num_views $num_views \
--network $network \
--net_dict "{'ratio_ginout':0.5,'global_skip':False}" \
--use_mask True --soft_mask False \
--loss 'l2' --trainer_mode 'train' \
--tensorboard_root $res_dir'/freeseed/tb' \
--tensorboard_dir '['$num_views']'$network \
--checkpoint_root $res_dir'/freeseed/ckpt' \
--checkpoint_dir '['$num_views']'$network \
--dataset_path '/mnt/data_jixie1/clma/aapm_tr5410_te526' \
--batch_size 2 --num_workers 4 --log_interval 200 \
--use_tqdm # --use_wandb --wandb_project 'freeseed' --wandb_root $res_dir'/freeseed/wandb'
# Scripts for training FreeNet (global skip defined in network):
num_views=72
network='freenet'
CUDA_VISIBLE_DEVICES="0" python -m torch.distributed.launch \
--master_port 10020 --nproc_per_node 1 \
main.py --epochs $epochs \
--lr 1e-4 --optimizer 'adam' \
--scheduler 'step' --step_size 10 --step_gamma 0.5 \
--dataset_name 'aapm' --dataset_shape $dataset_shape \
--num_views $num_views \
--network $network \
--net_dict "{'ratio_ginout':0.5,'global_skip':True}" \
--use_mask True --soft_mask False \
--loss 'l2' --trainer_mode 'train' \
--tensorboard_root $res_dir'/freeseed/tb' \
--tensorboard_dir '['$num_views']'$network \
--checkpoint_root $res_dir'/freeseed/ckpt' \
--checkpoint_dir '['$num_views']'$network \
--dataset_path '/mnt/data_jixie1/clma/aapm_tr5410_te526' \
--batch_size 2 --num_workers 4 --log_interval 200 \
--use_tqdm # --use_wandb --wandb_project 'freeseed' --wandb_root $res_dir'/freeseed/wandb'
# Scripts for training DuDoFreeSeed (global skip defined in network):
num_views=72
network='dudofreenet'
CUDA_VISIBLE_DEVICES="0" python -m torch.distributed.launch \
--master_port 10023 --nproc_per_node 1 \
main.py --epochs 20 \
--lr 1e-4 --optimizer 'adam' \
--pretrain_lr 1e-4 --pretrain_epochs 10 \
--scheduler 'step' --step_size 10 --step_gamma 0.5 \
--dataset_name 'aapm' --dataset_shape $dataset_shape \
--num_views $num_views \
--network $network \
--net_dict "{'ratio_ginout':0.5,'mask_type':'bp-gaussian-mc'}" \
--use_mask True --soft_mask False \
--loss 'l2' --trainer_mode 'train' \
--tensorboard_root $res_dir'/freeseed/tb' \
--tensorboard_dir '['$num_views']'$network \
--checkpoint_root $res_dir'/freeseed/ckpt' \
--checkpoint_dir '['$num_views']'$network \
--dataset_path '/mnt/data_jixie1/clma/aapm_tr5410_te526' \
--batch_size 2 --num_workers 4 --log_interval 200 --save_epochs 1 \
--use_tqdm #--use_wandb --wandb_project 'freeseed' --wandb_root $res_dir'/freeseed/wandb'