-
Notifications
You must be signed in to change notification settings - Fork 14
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #21 from anshuman23/dev
Added Keras example
- Loading branch information
Showing
2 changed files
with
73 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
'''Trains a simple deep NN on the MNIST dataset. | ||
Gets to 98.40% test accuracy after 20 epochs | ||
(there is *a lot* of margin for parameter tuning). | ||
2 seconds per epoch on a K520 GPU. | ||
''' | ||
|
||
from __future__ import print_function | ||
import keras | ||
from keras.datasets import mnist | ||
from keras.models import Sequential | ||
from keras.layers import Dense, Dropout | ||
from keras.optimizers import RMSprop | ||
from tensorflow.python.framework.graph_util import convert_variables_to_constants | ||
from keras import backend | ||
import tensorflow as tf | ||
|
||
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True): | ||
graph = session.graph | ||
with graph.as_default(): | ||
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or [])) | ||
output_names = output_names or [] | ||
output_names += [v.op.name for v in tf.global_variables()] | ||
input_graph_def = graph.as_graph_def() | ||
if clear_devices: | ||
for node in input_graph_def.node: | ||
node.device = "" | ||
frozen_graph = convert_variables_to_constants(session, input_graph_def, output_names, freeze_var_names) | ||
return frozen_graph | ||
|
||
batch_size = 128 | ||
num_classes = 10 | ||
epochs = 20 | ||
|
||
# the data, split between train and test sets | ||
(x_train, y_train), (x_test, y_test) = mnist.load_data() | ||
|
||
x_train = x_train.reshape(60000, 784) | ||
x_test = x_test.reshape(10000, 784) | ||
x_train = x_train.astype('float32') | ||
x_test = x_test.astype('float32') | ||
x_train /= 255 | ||
x_test /= 255 | ||
print(x_train.shape[0], 'train samples') | ||
print(x_test.shape[0], 'test samples') | ||
|
||
# convert class vectors to binary class matrices | ||
y_train = keras.utils.to_categorical(y_train, num_classes) | ||
y_test = keras.utils.to_categorical(y_test, num_classes) | ||
|
||
model = Sequential() | ||
model.add(Dense(512, activation='relu', input_shape=(784,), name="input")) | ||
model.add(Dropout(0.2)) | ||
model.add(Dense(512, activation='relu')) | ||
model.add(Dropout(0.2)) | ||
model.add(Dense(num_classes, activation='softmax', name="output")) | ||
|
||
model.summary() | ||
|
||
model.compile(loss='categorical_crossentropy', | ||
optimizer=RMSprop(), | ||
metrics=['accuracy']) | ||
|
||
history = model.fit(x_train, y_train, | ||
batch_size=batch_size, | ||
epochs=epochs, | ||
verbose=1, | ||
validation_data=(x_test, y_test)) | ||
score = model.evaluate(x_test, y_test, verbose=0) | ||
print('Test loss:', score[0]) | ||
print('Test accuracy:', score[1]) | ||
|
||
frozen_graph = freeze_session(backend.get_session(), output_names=[out.op.name for out in model.outputs]) | ||
tf.train.write_graph(frozen_graph, "./", "mnist_mlp.pb", as_text=False) |
Binary file not shown.