This repository was archived by the owner on Jun 24, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain.py
87 lines (68 loc) · 3.42 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import model, utils
import torch
import torch.nn as nn
import torch.optim as optim
class FCN():
def __init__(self, num=8, num_classes=21, ignore_index=-1, gpu_id=0, print_freq=10, epoch_print=10):
self.num = num
self.num_classes = num_classes
self.ignore_index = ignore_index
self.gpu = gpu_id
self.print_freq = print_freq
self.epoch_print = epoch_print
torch.cuda.set_device(self.gpu)
self.loss_function = nn.CrossEntropyLoss(ignore_index=self.ignore_index).cuda(self.gpu)
self.model = model.fcn(self.num, self.num_classes).cuda(self.gpu)
self.eps = 1e-10
self.best_mIoU = 0
def train(self, train_data, test_data, save=False, epochs=74, lr=0.01, momentum=0.9, weight_decay=0.0005):
optimizer = optim.SGD(self.model.parameters(), lr, momentum=momentum, weight_decay=weight_decay)
self.model.train()
for epoch in range(epochs):
if epoch % self.epoch_print == 0: print('Epoch {} Started...'.format(epoch+1))
for i, (X, y) in enumerate(train_data):
n, c, h, w = y.shape
y = y.view(n, h, w).type(torch.LongTensor)
X, y = X.cuda(self.gpu, non_blocking=True), y.cuda(self.gpu, non_blocking=True)
output = self.model(X)
loss = self.loss_function(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % self.print_freq == 0:
test_mIoU, test_loss = self.test(test_data)
if epoch % self.epoch_print == 0:
state = ('Iteration : {} - Train Loss : {:.6f}, Test Loss : {:.6f}, '
'Test mIoU : {:.4f}'.format(i+1, loss.item(), test_loss, 100*test_mIoU))
if test_mIoU > self.best_mIoU:
print()
print('*' * 35, 'Best mIoU Updated', '*' * 35)
print(state)
self.best_mIoU = test_mIoU
if save:
torch.save(self.model.state_dict(), './FCN_' + str(self.num) + 's_' +'best.pt')
print('Saved Best Model')
print()
else:
print(state)
def test(self, test_data):
tps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
fps = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
fns = torch.zeros(self.num_classes).cuda(self.gpu, non_blocking=True)
losses = []
self.model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_data):
n, c, h, w = y.shape
y = y.view(n, h, w).type(torch.LongTensor)
X, y = X.cuda(self.gpu, non_blocking=True), y.cuda(self.gpu, non_blocking=True)
output = self.model(X)
loss = self.loss_function(output, y)
losses.append(loss.item())
tp, fp, fn = utils.mIoU(output, y, self.num_classes, self.gpu)
tps += tp
fps += fp
fns += fn
self.model.train()
mIoU = torch.sum(tps/(tps+fps+fns))/self.num_classes
return mIoU, sum(losses)/len(losses)