forked from pannous/tensorflow-speech-recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
speech2text-tflearn.py
executable file
·49 lines (40 loc) · 1.42 KB
/
speech2text-tflearn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env python
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import tflearn
import speech_data
learning_rate = 0.0001
training_iters = 300000 # steps
batch_size = 64
width = 20 # mfcc features
height = 80 # (max) length of utterance
classes = 10 # digits
batch = word_batch = speech_data.mfcc_batch_generator(batch_size)
X, Y = next(batch)
# train, test, _ = ,X
trainX, trainY = X, Y
testX, testY = X, Y #overfit for now
# Data preprocessing
# Sequence padding
# trainX = pad_sequences(trainX, maxlen=100, value=0.)
# testX = pad_sequences(testX, maxlen=100, value=0.)
# # Converting labels to binary vectors
# trainY = to_categorical(trainY, nb_classes=2)
# testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, width, height])
# net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.load("tflearn.lstm.model")
while 1: #training_iters
model.fit(trainX, trainY, n_epoch=100, validation_set=(testX, testY), show_metric=True,
batch_size=batch_size)
_y=model.predict(X)
model.save("tflearn.lstm.model")
print (_y)
print (y)