-
Notifications
You must be signed in to change notification settings - Fork 0
/
WaveUNet_with_skip2.py
167 lines (126 loc) · 7.23 KB
/
WaveUNet_with_skip2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
experiment = Experiment(api_key="OKhPlin1BVQJFzniHu1f3K1t3",
project_name="micplacementwavenet", workspace="cm5409a")
batch_size = 32
steps_per_epoch = 1000
num_epochs = 10
# gen_obj_train = TrainGenerator(x_train, y_train, win_size, batch_size)
# gen_obj_val = TrainGenerator(x_val, y_val, win_size, batch_size)
# train_gen = gen_obj_train.generator()
# val_gen = gen_obj_val.generator()
# init_x, init_y = next(train_gen)
Fc = 24 # num filters per layer (which is multiplied by depth)
sources_to_estimate = 1
init_x = x_train[0]
init_y = y_train[0]
input_shape = (x_train.shape[1], 1)
output_shape = (y_train.shape[1], 1)
main_dim = input_shape[1]
#INPUT =======================================================
input_layer = Input(shape=input_shape)
downsample_0 = Conv1D(filters=Fc*1, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(input_layer)
downsample_0 = LeakyReLU(alpha=0.05)(downsample_0) # ACTIVATION
downsample_0 = MaxPooling1D(pool_size=2)(downsample_0) # DOWNSAMPLE
downsample_1 = Conv1D(filters=Fc*2, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_0)
downsample_1 = LeakyReLU(alpha=0.05)(downsample_1) # ACTIVATION
downsample_1 = MaxPooling1D(pool_size=2)(downsample_1) # DOWNSAMPLE
downsample_2 = Conv1D(filters=Fc*3, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_1)
downsample_2 = LeakyReLU(alpha=0.05) (downsample_2)# ACTIVATION
downsample_2 = MaxPooling1D(pool_size=2)(downsample_2) # DOWNSAMPLE
downsample_3 = Conv1D(filters=Fc*4, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_2)
downsample_3 = LeakyReLU(alpha=0.05) (downsample_3)# ACTIVATION
downsample_3 = MaxPooling1D(pool_size=2)(downsample_3) # DOWNSAMPLE
downsample_4 = Conv1D(filters=Fc*5, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_3)
downsample_4 = LeakyReLU(alpha=0.05) (downsample_4)# ACTIVATION
downsample_4 = MaxPooling1D(pool_size=2)(downsample_4) # DOWNSAMPLE
downsample_5 = Conv1D(filters=Fc*6, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_4)
downsample_5 = LeakyReLU(alpha=0.05) (downsample_5)# ACTIVATION
downsample_5 = MaxPooling1D(pool_size=2)(downsample_5) # DOWNSAMPLE
downsample_6 = Conv1D(filters=Fc*7, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_5)
downsample_6 = LeakyReLU(alpha=0.05) (downsample_6)# ACTIVATION
downsample_6 = MaxPooling1D(pool_size=2)(downsample_6) # DOWNSAMPLE
downsample_7 = Conv1D(filters=Fc*8, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_6)
downsample_7 = LeakyReLU(alpha=0.05) (downsample_7)# ACTIVATION
downsample_7 = MaxPooling1D(pool_size=2)(downsample_7) # DOWNSAMPLE
downsample_8 = Conv1D(filters=Fc*9, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_7)
downsample_8 = LeakyReLU(alpha=0.05) (downsample_8)# ACTIVATION
downsample_8 = MaxPooling1D(pool_size=2)(downsample_8) # DOWNSAMPLE
downsample_9 = Conv1D(filters=Fc*10, kernel_size=15, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_8)
downsample_9 = LeakyReLU(alpha=0.05) (downsample_9)# ACTIVATION
downsample_9 = MaxPooling1D(pool_size=2)(downsample_9) # DOWNSAMPLE
# =====================================================
# consider extending this so that shape in center reaches 4 or even 2 (12 layer)
upsample_9 = Conv1D(filters=Fc*10, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(downsample_9)
upsample_9 = LeakyReLU(alpha=0.05)(upsample_9) # ACTIVATION
upsample_9 = UpSampling1D(size=2)(upsample_9) # UPSAMPLE
upsample_8 = concatenate([upsample_9, downsample_8])
upsample_8 = Conv1D(filters=Fc*9, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_8)
upsample_8 = LeakyReLU(alpha=0.05)(upsample_8) # ACTIVATION
upsample_8 = UpSampling1D(size=2)(upsample_8) # UPSAMPLE
upsample_7 = concatenate([upsample_8, downsample_7])
upsample_7 = Conv1D(filters=Fc*8, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_7)
upsample_7 = LeakyReLU(alpha=0.05)(upsample_7) # ACTIVATION
upsample_7 = UpSampling1D(size=2)(upsample_7) # UPSAMPLE
upsample_5 = concatenate([upsample_7, downsample_6])
upsample_6 = Conv1D(filters=Fc*7, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_6)
upsample_6 = LeakyReLU(alpha=0.05)(upsample_6) # ACTIVATION
upsample_6 = UpSampling1D(size=2)(upsample_6) # UPSAMPLE
upsample_5 = concatenate([upsample_6, downsample_5])
upsample_5 = Conv1D(filters=Fc*6, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_5)
upsample_5 = LeakyReLU(alpha=0.05)(upsample_5) # ACTIVATION
upsample_5 = UpSampling1D(size=2)(upsample_5) # UPSAMPLE
upsample_4 = concatenate([upsample_5, downsample_4])
upsample_4 = Conv1D(filters=Fc*5, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_4)
upsample_4 = LeakyReLU(alpha=0.05)(upsample_4) # ACTIVATION
upsample_4 = UpSampling1D(size=2)(upsample_4) # UPSAMPLE
upsample_3 = concatenate([upsample_4, downsample_3])
upsample_3 = Conv1D(filters=Fc*4, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_3)
upsample_3 = LeakyReLU(alpha=0.05)(upsample_3) # ACTIVATION
upsample_3 = UpSampling1D(size=2)(upsample_3) # UPSAMPLE
upsample_2 = concatenate([upsample_3, downsample_2])
upsample_2 = Conv1D(filters=Fc*3, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_2)
upsample_2 = LeakyReLU(alpha=0.05)(upsample_2) # ACTIVATION
upsample_2 = UpSampling1D(size=2)(upsample_2) # UPSAMPLE
upsample_1 = concatenate([upsample_2, downsample_1])
upsample_1 = Conv1D(filters=Fc*2, kernel_size=5, padding='same',
input_shape=(1, init_x.shape[1]))(upsample_1)
upsample_1 = LeakyReLU(alpha=0.05)(upsample_1) # ACTIVATION
upsample_1 = UpSampling1D(size=2)(upsample_1) # UPSAMPLE
upsample_0 = concatenate([upsample_1, downsample_0]) # CONCATENATE SKIP
upsample_0 = Conv1D(filters=Fc*1, kernel_size=1, padding='same',
input_shape=(init_y.shape[1], 1))(upsample_0)
upsample_0 = LeakyReLU(alpha=0.05)(upsample_0) # ACTIVATION
upsample_0 = UpSampling1D(size=2)(upsample_0) # UPSAMPLE
# look into the use of 'input_shape' in these layers, may not be needed
output_layer = Conv1D(filters=sources_to_estimate, kernel_size=1, padding='same')(upsample_0)
# output_layer = Dense(output_shape[1], activation='tanh')(upsample_0)
model = Model(input_layer, output_layer)
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
# cb = callback(gen_obj_val, model)
# result = model.fit_generator(train_gen, steps_per_epoch=steps_per_epoch,
# epochs=num_epochs, validation_data=val_gen, validation_steps=100, verbose=1,
# callbacks=[cb])
result = model.fit(x_train,
y_train,
batch_size=batch_size,
shuffle=True,
epochs=num_epochs,
validation_data=(x_val, y_val))