-
Notifications
You must be signed in to change notification settings - Fork 5
/
models.py
169 lines (142 loc) · 6.72 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import torch
# using glorot initialization
def init_weights(m):
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight.data)
class CNN(torch.nn.Module):
def __init__(self, channels, conv_kernels, conv_strides, conv_padding, pool_padding, num_classes=10):
assert len(conv_kernels) == len(channels) == len(conv_strides) == len(conv_padding)
super(CNN, self).__init__()
# create conv blocks
self.conv_blocks = torch.nn.ModuleList()
prev_channel = 1
for i in range(len(channels)):
# add stacked conv layer
block = []
for j, conv_channel in enumerate(channels[i]):
block.append( torch.nn.Conv1d(in_channels = prev_channel, out_channels = conv_channel, kernel_size = conv_kernels[i], stride = conv_strides[i], padding = conv_padding[i]) )
prev_channel = conv_channel
# add batch norm layer
block.append( torch.nn.BatchNorm1d(prev_channel) )
# adding ReLU
block.append( torch.nn.ReLU() )
self.conv_blocks.append( torch.nn.Sequential(*block) )
# create pool blocks
self.pool_blocks = torch.nn.ModuleList()
for i in range(len(pool_padding)):
# adding Max Pool (drops dims by a factor of 4)
self.pool_blocks.append( torch.nn.MaxPool1d(kernel_size = 4, stride = 4, padding = pool_padding[i]) )
# global pooling
self.global_pool = torch.nn.AdaptiveAvgPool1d(1)
self.linear = torch.nn.Linear(prev_channel, num_classes)
def forward(self, inwav):
for i in range(len(self.conv_blocks)):
# apply conv layer
inwav = self.conv_blocks[i](inwav)
# apply max_pool
if i < len(self.pool_blocks): inwav = self.pool_blocks[i](inwav)
# apply global pooling
out = self.global_pool(inwav).squeeze()
out = self.linear(out)
return out.squeeze()
class ResBlock(torch.nn.Module):
def __init__(self, prev_channel, channel, conv_kernel, conv_stride, conv_pad):
super(ResBlock, self).__init__()
self.res = torch.nn.Sequential(
torch.nn.Conv1d(in_channels = prev_channel, out_channels = channel, kernel_size = conv_kernel, stride = conv_stride, padding = conv_pad),
torch.nn.BatchNorm1d(channel),
torch.nn.ReLU(),
torch.nn.Conv1d(in_channels = channel, out_channels = channel, kernel_size = conv_kernel, stride = conv_stride, padding = conv_pad),
torch.nn.BatchNorm1d(channel),
)
self.bn = torch.nn.BatchNorm1d(channel)
self.relu = torch.nn.ReLU()
def forward(self, x):
identity = x
x = self.res(x)
if x.shape[1] == identity.shape[1]:
x += identity
# repeat the smaller block till it reaches the size of the bigger block
elif x.shape[1] > identity.shape[1]:
if x.shape[1] % identity.shape[1] == 0:
x += identity.repeat(1, x.shape[1]//identity.shape[1], 1)
else:
raise RuntimeError("Dims in ResBlock needs to be divisible on the previous dims!!")
else:
if identity.shape[1] % x.shape[1] == 0:
identity += x.repeat(1, identity.shape[1]//x.shape[1], 1)
else:
raise RuntimeError("Dims in ResBlock needs to be divisible on the previous dims!!")
x = identity
x = self.bn(x)
x = self.relu(x)
return x
class CNNRes(torch.nn.Module):
def __init__(self, channels, conv_kernels, conv_strides, conv_padding, pool_padding, num_classes=10):
assert len(conv_kernels) == len(channels) == len(conv_strides) == len(conv_padding)
super(CNNRes, self).__init__()
# create conv block
prev_channel = 1
self.conv_block = torch.nn.Sequential(
torch.nn.Conv1d(in_channels = prev_channel, out_channels = channels[0][0], kernel_size = conv_kernels[0], stride = conv_strides[0], padding = conv_padding[0]),
# add batch norm layer
torch.nn.BatchNorm1d(channels[0][0]),
# adding ReLU
torch.nn.ReLU(),
# adding max pool
torch.nn.MaxPool1d(kernel_size = 4, stride = 4, padding = pool_padding[0]),
)
# create res
prev_channel = channels[0][0]
self.res_blocks = torch.nn.ModuleList()
for i in range(1, len(channels)):
# add stacked res layer
block = []
for j, conv_channel in enumerate(channels[i]):
block.append( ResBlock(prev_channel, conv_channel, conv_kernels[i], conv_strides[i], conv_padding[i]) )
prev_channel = conv_channel
self.res_blocks.append( torch.nn.Sequential(*block) )
# create pool blocks
self.pool_blocks = torch.nn.ModuleList()
for i in range(1, len(pool_padding)):
# adding Max Pool (drops dims by a factor of 4)
self.pool_blocks.append( torch.nn.MaxPool1d(kernel_size = 4, stride = 4, padding = pool_padding[i]) )
# global pooling
self.global_pool = torch.nn.AdaptiveAvgPool1d(1)
self.linear = torch.nn.Linear(prev_channel, num_classes)
def forward(self, inwav):
inwav = self.conv_block(inwav)
for i in range(len(self.res_blocks)):
# apply conv layer
inwav = self.res_blocks[i](inwav)
# apply max_pool
if i < len(self.pool_blocks): inwav = self.pool_blocks[i](inwav)
# apply global pooling
out = self.global_pool(inwav).squeeze()
out = self.linear(out)
return out.squeeze()
m3 = CNN(channels = [[256], [256]],
conv_kernels = [80, 3],
conv_strides = [4, 1],
conv_padding = [38, 1],
pool_padding = [0, 0])
m5 = CNN(channels = [[128], [128], [256], [512]],
conv_kernels = [80, 3, 3, 3],
conv_strides = [4, 1, 1, 1],
conv_padding = [38, 1, 1, 1],
pool_padding = [0, 0, 0, 2])
m11 = CNN(channels = [[64], [64]*2, [128]*2, [256]*3, [512]*2],
conv_kernels = [80, 3, 3, 3, 3],
conv_strides = [4, 1, 1, 1, 1],
conv_padding = [38, 1, 1, 1, 1],
pool_padding = [0, 0, 0, 2])
m18 = CNN(channels = [[64], [64]*4, [128]*4, [256]*4, [512]*4],
conv_kernels = [80, 3, 3, 3, 3],
conv_strides = [4, 1, 1, 1, 1],
conv_padding = [38, 1, 1, 1, 1],
pool_padding = [0, 0, 0, 2])
m34_res = CNNRes(channels = [[48], [48]*3, [96]*4, [192]*6, [384]*3],
conv_kernels = [80, 3, 3, 3, 3],
conv_strides = [4, 1, 1, 1, 1],
conv_padding = [38, 1, 1, 1, 1],
pool_padding = [0, 0, 0, 2])