-
Notifications
You must be signed in to change notification settings - Fork 2
/
train_cou.py
155 lines (131 loc) · 5.32 KB
/
train_cou.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import torch
from torch import nn
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import DataLoader
from loader import *
from models.model import MHA_UNet
from dataset.npy_datasets import NPY_datasets
from engine import *
import os
import sys
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # "0, 1, 2, 3"
from utils import *
from configs.config_setting import setting_config
import warnings
warnings.filterwarnings("ignore")
def main(config):
print('#----------Creating logger----------#')
sys.path.append(config.work_dir + '/')
log_dir = os.path.join(config.work_dir, 'log')
checkpoint_dir = os.path.join(config.work_dir, 'checkpoints')
resume_model = os.path.join(checkpoint_dir, 'latest.pth')
outputs = os.path.join(config.work_dir, 'outputs')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(outputs):
os.makedirs(outputs)
global logger
logger = get_logger('train', log_dir)
log_config_info(config, logger)
print('#----------GPU init----------#')
set_seed(config.seed)
gpu_ids = [0] # [0, 1, 2, 3]
torch.cuda.empty_cache()
print('#----------Preparing dataset----------#')
train_dataset = isic_loader(path_Data=config.data_path, train=True)
train_loader = DataLoader(train_dataset,
batch_size=config.batch_size,
shuffle=True,
pin_memory=True,
num_workers=config.num_workers)
val_dataset = isic_loader(path_Data=config.data_path, train=False)
val_loader = DataLoader(val_dataset,
batch_size=1,
shuffle=False,
pin_memory=True,
num_workers=config.num_workers,
drop_last=True)
test_dataset = isic_loader(path_Data=config.data_path, train=False, Test=True)
test_loader = DataLoader(test_dataset,
batch_size=1,
shuffle=False,
pin_memory=True,
num_workers=config.num_workers,
drop_last=True)
print('#----------Prepareing Models----------#')
#model_cfg = config.model_config
model = MHA_UNet()
model = torch.nn.DataParallel(model.cuda(), device_ids=gpu_ids, output_device=gpu_ids[0])
print('#----------Prepareing loss, opt, sch and amp----------#')
criterion = config.criterion
optimizer = get_optimizer(config, model)
scheduler = get_scheduler(config, optimizer)
scaler = GradScaler()
print('#----------Set other params----------#')
min_loss = 999
start_epoch = 1
min_epoch = 1
print('#----------Resume Model and Other params----------#')
checkpoint = torch.load('', map_location=torch.device('cpu'))
model.module.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
saved_epoch = checkpoint['epoch']
start_epoch += saved_epoch
min_loss, min_epoch, loss = checkpoint['min_loss'], checkpoint['min_epoch'], checkpoint['loss']
log_info = f'resuming model from {resume_model}. resume_epoch: {saved_epoch}, min_loss: {min_loss:.4f}, min_epoch: {min_epoch}, loss: {loss:.4f}'
logger.info(log_info)
print('#----------Training----------#')
for epoch in range(start_epoch, config.epochs + 1):
torch.cuda.empty_cache()
train_one_epoch(
train_loader,
model,
criterion,
optimizer,
scheduler,
epoch,
logger,
config,
scaler=scaler
)
loss = val_one_epoch(
val_loader,
model,
criterion,
epoch,
logger,
config
)
if loss < min_loss:
torch.save(model.module.state_dict(), os.path.join(checkpoint_dir, 'best.pth'))
min_loss = loss
min_epoch = epoch
torch.save(
{
'epoch': epoch,
'min_loss': min_loss,
'min_epoch': min_epoch,
'loss': loss,
'model_state_dict': model.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
}, os.path.join(checkpoint_dir, 'latest.pth'))
if os.path.exists(os.path.join(checkpoint_dir, 'best.pth')):
print('#----------Testing_best----------#')
best_weight = torch.load(config.work_dir + 'checkpoints/best.pth', map_location=torch.device('cpu'))
model.module.load_state_dict(best_weight)
loss = test_one_epoch(
test_loader,
model,
criterion,
logger,
config,
)
os.rename(
os.path.join(checkpoint_dir, 'best.pth'),
os.path.join(checkpoint_dir, f'best-epoch{min_epoch}-loss{min_loss:.4f}.pth')
)
if __name__ == '__main__':
config = setting_config
main(config)