-
Notifications
You must be signed in to change notification settings - Fork 21
/
train.py
44 lines (39 loc) · 1.71 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from __future__ import print_function
import torch.nn.functional as F
from torch.autograd import Variable
def train(loader, model, optimizer, epoch, cuda, log_interval, verbose=True):
model.train()
global_epoch_loss = 0
for batch_idx, (data, target) in enumerate(loader):
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
global_epoch_loss += loss.data[0]
if verbose:
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(loader.dataset), 100.
* batch_idx / len(loader), loss.data[0]))
return global_epoch_loss / len(loader.dataset)
def test(loader, model, cuda, verbose=True):
model.eval()
test_loss = 0
correct = 0
for data, target in loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(loader.dataset)
if verbose:
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(loader.dataset), 100. * correct / len(loader.dataset)))
return test_loss