-
Notifications
You must be signed in to change notification settings - Fork 0
/
neural_net.py
72 lines (58 loc) · 2.12 KB
/
neural_net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import torch
from torch import nn
from torch.distributions.multivariate_normal import MultivariateNormal
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
def FFLayer(input_size, output_size):
return nn.Sequential(*[
nn.Linear(input_size, output_size),
nn.ReLU()
])
def FFLayers(input_size, output_size, hidden_size, hidden_count):
return nn.Sequential(*[
FFLayer(input_size, hidden_size),
*[FFLayer(hidden_size, hidden_size) for i in range(hidden_count)],
nn.Linear(hidden_size, output_size)
])
class FFDropoutLayer(torch.nn.Module):
def __init__(self, input_size, output_size, dropout_p=.2):
super(FFDropoutLayer,self).__init__()
self.linear = nn.Linear(input_size, output_size)
self.dropout_p = dropout_p
def forward(self, x):
x = self.linear(x)
x = F.dropout(x, p=self.dropout_p, training=self.training)
return F.relu(x)
def FFDropoutLayers(input_size, output_size, hidden_size, hidden_count, dropout_p=.2):
return nn.Sequential(*[
FFDropoutLayer(input_size, hidden_size),
*[FFDropoutLayer(hidden_size, hidden_size) for i in range(hidden_count)],
nn.Linear(hidden_size, output_size)
])
def train(train_loader, net, criterion, optimizer, scheduler):
train_losses = []
for data, target in tqdm(train_loader):
data, target = data.view(len(data), -1).float(), target
optimizer.zero_grad()
output = net(data)
loss = criterion(output, target)
train_losses.append(loss.item())
loss.backward()
optimizer.step()
return train_losses
def test(test_loader, predict, net, criterion):
scores = []
losses = []
net.train()
for data, target in test_loader:
output = predict(data.view(len(data), -1), net)
predictions = torch.argmax(output.data, 1)
scores.append((predictions == target).float().mean().detach().item())
loss = criterion(net(data.view(len(data), -1)), target)
losses.append(loss.detach().item())
return sum(scores)/len(test_loader), sum(losses)/len(test_loader)