-
Notifications
You must be signed in to change notification settings - Fork 199
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Burak Bayramli
committed
Mar 22, 2018
1 parent
c9a888e
commit 998c748
Showing
53 changed files
with
4,754 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
""" | ||
Utility functions to avoid warnings while testing both Keras 1 and 2. | ||
""" | ||
import keras | ||
|
||
keras_2 = int(keras.__version__.split(".")[0]) > 1 # Keras > 1 | ||
|
||
|
||
def fit_generator(model, generator, epochs, steps_per_epoch): | ||
if keras_2: | ||
model.fit_generator(generator, epochs=epochs, steps_per_epoch=steps_per_epoch) | ||
else: | ||
model.fit_generator(generator, nb_epoch=epochs, samples_per_epoch=steps_per_epoch) | ||
|
||
|
||
def fit(model, x, y, nb_epoch=10, *args, **kwargs): | ||
if keras_2: | ||
return model.fit(x, y, *args, epochs=nb_epoch, **kwargs) | ||
else: | ||
return model.fit(x, y, *args, nb_epoch=nb_epoch, **kwargs) | ||
|
||
|
||
def l1l2(l1=0, l2=0): | ||
if keras_2: | ||
return keras.regularizers.L1L2(l1, l2) | ||
else: | ||
return keras.regularizers.l1l2(l1, l2) | ||
|
||
|
||
def Dense(units, W_regularizer=None, W_initializer='glorot_uniform', **kwargs): | ||
if keras_2: | ||
return keras.layers.Dense(units, kernel_regularizer=W_regularizer, kernel_initializer=W_initializer, **kwargs) | ||
else: | ||
return keras.layers.Dense(units, W_regularizer=W_regularizer, init=W_initializer, **kwargs) | ||
|
||
|
||
def BatchNormalization(mode=0, **kwargs): | ||
if keras_2: | ||
return keras.layers.BatchNormalization(**kwargs) | ||
else: | ||
return keras.layers.BatchNormalization(mode=mode, **kwargs) | ||
|
||
|
||
def Convolution2D(units, w, h, W_regularizer=None, W_initializer='glorot_uniform', border_mode='same', **kwargs): | ||
if keras_2: | ||
return keras.layers.Convolution2D(units, (w, h), padding=border_mode, kernel_regularizer=W_regularizer, | ||
kernel_initializer=W_initializer, | ||
**kwargs) | ||
else: | ||
return keras.layers.Convolution2D(units, w, h, border_mode=border_mode, W_regularizer=W_regularizer, | ||
init=W_initializer, | ||
**kwargs) | ||
|
||
|
||
def AveragePooling2D(pool_size, border_mode='valid', **kwargs): | ||
if keras_2: | ||
return keras.layers.AveragePooling2D(pool_size=pool_size, padding=border_mode, **kwargs) | ||
else: | ||
return keras.layers.AveragePooling2D(pool_size=pool_size, border_mode=border_mode, **kwargs) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
from __future__ import print_function | ||
import numpy as np | ||
from keras.datasets import mnist | ||
from keras.models import Sequential | ||
from keras.layers.core import Dense, Activation | ||
from keras.optimizers import SGD | ||
from keras.utils import np_utils | ||
|
||
np.random.seed(1671) # for reproducibility | ||
|
||
# network and training | ||
NB_EPOCH = 200 | ||
BATCH_SIZE = 128 | ||
VERBOSE = 1 | ||
NB_CLASSES = 10 # number of outputs = number of digits | ||
OPTIMIZER = SGD() # SGD optimizer, explained later in this chapter | ||
N_HIDDEN = 128 | ||
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION | ||
|
||
# data: shuffled and split between train and test sets | ||
# | ||
(X_train, y_train), (X_test, y_test) = mnist.load_data() | ||
|
||
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 | ||
RESHAPED = 784 | ||
# | ||
X_train = X_train.reshape(60000, RESHAPED) | ||
X_test = X_test.reshape(10000, RESHAPED) | ||
X_train = X_train.astype('float32') | ||
X_test = X_test.astype('float32') | ||
|
||
# normalize | ||
# | ||
X_train /= 255 | ||
X_test /= 255 | ||
print(X_train.shape[0], 'train samples') | ||
print(X_test.shape[0], 'test samples') | ||
|
||
# convert class vectors to binary class matrices | ||
Y_train = np_utils.to_categorical(y_train, NB_CLASSES) | ||
Y_test = np_utils.to_categorical(y_test, NB_CLASSES) | ||
|
||
# 10 outputs | ||
# final stage is softmax | ||
|
||
model = Sequential() | ||
model.add(Dense(NB_CLASSES, input_shape=(RESHAPED,))) | ||
model.add(Activation('softmax')) | ||
|
||
model.summary() | ||
|
||
model.compile(loss='categorical_crossentropy', | ||
optimizer=OPTIMIZER, | ||
metrics=['accuracy']) | ||
|
||
history = model.fit(X_train, Y_train, | ||
batch_size=BATCH_SIZE, epochs=NB_EPOCH, | ||
verbose=VERBOSE, validation_split=VALIDATION_SPLIT) | ||
score = model.evaluate(X_test, Y_test, verbose=VERBOSE) | ||
print("\nTest score:", score[0]) | ||
print('Test accuracy:', score[1]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
from __future__ import print_function | ||
import numpy as np | ||
from keras.datasets import mnist | ||
from keras.models import Sequential | ||
from keras.layers.core import Dense, Activation | ||
from keras.optimizers import SGD | ||
from keras.utils import np_utils | ||
|
||
np.random.seed(1671) # for reproducibility | ||
|
||
# network and training | ||
NB_EPOCH = 20 | ||
BATCH_SIZE = 128 | ||
VERBOSE = 1 | ||
NB_CLASSES = 10 # number of outputs = number of digits | ||
OPTIMIZER = SGD() # optimizer, explained later in this chapter | ||
N_HIDDEN = 128 | ||
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION | ||
|
||
# data: shuffled and split between train and test sets | ||
(X_train, y_train), (X_test, y_test) = mnist.load_data() | ||
|
||
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 | ||
RESHAPED = 784 | ||
# | ||
X_train = X_train.reshape(60000, RESHAPED) | ||
X_test = X_test.reshape(10000, RESHAPED) | ||
X_train = X_train.astype('float32') | ||
X_test = X_test.astype('float32') | ||
|
||
# normalize | ||
X_train /= 255 | ||
X_test /= 255 | ||
print(X_train.shape[0], 'train samples') | ||
print(X_test.shape[0], 'test samples') | ||
|
||
# convert class vectors to binary class matrices | ||
Y_train = np_utils.to_categorical(y_train, NB_CLASSES) | ||
Y_test = np_utils.to_categorical(y_test, NB_CLASSES) | ||
|
||
# M_HIDDEN hidden layers | ||
# 10 outputs | ||
# final stage is softmax | ||
|
||
model = Sequential() | ||
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) | ||
model.add(Activation('relu')) | ||
model.add(Dense(N_HIDDEN)) | ||
model.add(Activation('relu')) | ||
model.add(Dense(NB_CLASSES)) | ||
model.add(Activation('softmax')) | ||
model.summary() | ||
|
||
model.compile(loss='categorical_crossentropy', | ||
optimizer=OPTIMIZER, | ||
metrics=['accuracy']) | ||
|
||
history = model.fit(X_train, Y_train, | ||
batch_size=BATCH_SIZE, epochs=NB_EPOCH, | ||
verbose=VERBOSE, validation_split=VALIDATION_SPLIT) | ||
|
||
score = model.evaluate(X_test, Y_test, verbose=VERBOSE) | ||
print("\nTest score:", score[0]) | ||
print('Test accuracy:', score[1]) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
from __future__ import print_function | ||
import numpy as np | ||
from keras.datasets import mnist | ||
from keras.models import Sequential | ||
from keras.layers.core import Dense, Dropout, Activation | ||
from keras.optimizers import SGD | ||
from keras.utils import np_utils | ||
|
||
import matplotlib.pyplot as plt | ||
|
||
np.random.seed(1671) # for reproducibility | ||
|
||
# network and training | ||
NB_EPOCH = 250 | ||
BATCH_SIZE = 128 | ||
VERBOSE = 1 | ||
NB_CLASSES = 10 # number of outputs = number of digits | ||
OPTIMIZER = SGD() # optimizer, explained later in this chapter | ||
N_HIDDEN = 128 | ||
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION | ||
DROPOUT = 0.3 | ||
|
||
# data: shuffled and split between train and test sets | ||
(X_train, y_train), (X_test, y_test) = mnist.load_data() | ||
|
||
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 | ||
RESHAPED = 784 | ||
# | ||
X_train = X_train.reshape(60000, RESHAPED) | ||
X_test = X_test.reshape(10000, RESHAPED) | ||
X_train = X_train.astype('float32') | ||
X_test = X_test.astype('float32') | ||
|
||
# normalize | ||
X_train /= 255 | ||
X_test /= 255 | ||
print(X_train.shape[0], 'train samples') | ||
print(X_test.shape[0], 'test samples') | ||
|
||
# convert class vectors to binary class matrices | ||
Y_train = np_utils.to_categorical(y_train, NB_CLASSES) | ||
Y_test = np_utils.to_categorical(y_test, NB_CLASSES) | ||
|
||
# M_HIDDEN hidden layers | ||
# 10 outputs | ||
# final stage is softmax | ||
|
||
model = Sequential() | ||
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) | ||
model.add(Activation('relu')) | ||
model.add(Dropout(DROPOUT)) | ||
model.add(Dense(N_HIDDEN)) | ||
model.add(Activation('relu')) | ||
model.add(Dropout(DROPOUT)) | ||
model.add(Dense(NB_CLASSES)) | ||
model.add(Activation('softmax')) | ||
model.summary() | ||
|
||
model.compile(loss='categorical_crossentropy', | ||
optimizer=OPTIMIZER, | ||
metrics=['accuracy']) | ||
|
||
history = model.fit(X_train, Y_train, | ||
batch_size=BATCH_SIZE, epochs=NB_EPOCH, | ||
verbose=VERBOSE, validation_split=VALIDATION_SPLIT) | ||
|
||
score = model.evaluate(X_test, Y_test, verbose=VERBOSE) | ||
print("\nTest score:", score[0]) | ||
print('Test accuracy:', score[1]) | ||
|
||
# list all data in history | ||
print(history.history.keys()) | ||
# summarize history for accuracy | ||
plt.plot(history.history['acc']) | ||
plt.plot(history.history['val_acc']) | ||
plt.title('model accuracy') | ||
plt.ylabel('accuracy') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.show() | ||
# summarize history for loss | ||
plt.plot(history.history['loss']) | ||
plt.plot(history.history['val_loss']) | ||
plt.title('model loss') | ||
plt.ylabel('loss') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.show() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
from __future__ import print_function | ||
import numpy as np | ||
from keras.datasets import mnist | ||
from keras.models import Sequential | ||
from keras.layers.core import Dense, Dropout, Activation | ||
from keras.optimizers import RMSprop | ||
from keras.utils import np_utils | ||
|
||
import matplotlib.pyplot as plt | ||
|
||
np.random.seed(1671) # for reproducibility | ||
|
||
# network and training | ||
NB_EPOCH = 20 | ||
BATCH_SIZE = 128 | ||
VERBOSE = 1 | ||
NB_CLASSES = 10 # number of outputs = number of digits | ||
OPTIMIZER = RMSprop() # optimizer, explainedin this chapter | ||
N_HIDDEN = 128 | ||
VALIDATION_SPLIT=0.2 # how much TRAIN is reserved for VALIDATION | ||
DROPOUT = 0.3 | ||
|
||
# data: shuffled and split between train and test sets | ||
(X_train, y_train), (X_test, y_test) = mnist.load_data() | ||
|
||
#X_train is 60000 rows of 28x28 values --> reshaped in 60000 x 784 | ||
RESHAPED = 784 | ||
# | ||
X_train = X_train.reshape(60000, RESHAPED) | ||
X_test = X_test.reshape(10000, RESHAPED) | ||
X_train = X_train.astype('float32') | ||
X_test = X_test.astype('float32') | ||
|
||
# normalize | ||
X_train /= 255 | ||
X_test /= 255 | ||
print(X_train.shape[0], 'train samples') | ||
print(X_test.shape[0], 'test samples') | ||
|
||
# convert class vectors to binary class matrices | ||
Y_train = np_utils.to_categorical(y_train, NB_CLASSES) | ||
Y_test = np_utils.to_categorical(y_test, NB_CLASSES) | ||
|
||
# M_HIDDEN hidden layers | ||
# 10 outputs | ||
# final stage is softmax | ||
|
||
model = Sequential() | ||
model.add(Dense(N_HIDDEN, input_shape=(RESHAPED,))) | ||
model.add(Activation('relu')) | ||
model.add(Dropout(DROPOUT)) | ||
model.add(Dense(N_HIDDEN)) | ||
model.add(Activation('relu')) | ||
model.add(Dropout(DROPOUT)) | ||
model.add(Dense(NB_CLASSES)) | ||
model.add(Activation('softmax')) | ||
model.summary() | ||
|
||
model.compile(loss='categorical_crossentropy', | ||
optimizer=OPTIMIZER, | ||
metrics=['accuracy']) | ||
|
||
history = model.fit(X_train, Y_train, | ||
batch_size=BATCH_SIZE, epochs=NB_EPOCH, | ||
verbose=VERBOSE, validation_split=VALIDATION_SPLIT) | ||
|
||
score = model.evaluate(X_test, Y_test, verbose=VERBOSE) | ||
print("\nTest score:", score[0]) | ||
print('Test accuracy:', score[1]) | ||
|
||
# list all data in history | ||
print(history.history.keys()) | ||
# summarize history for accuracy | ||
plt.plot(history.history['acc']) | ||
plt.plot(history.history['val_acc']) | ||
plt.title('model accuracy') | ||
plt.ylabel('accuracy') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.show() | ||
# summarize history for loss | ||
plt.plot(history.history['loss']) | ||
plt.plot(history.history['val_loss']) | ||
plt.title('model loss') | ||
plt.ylabel('loss') | ||
plt.xlabel('epoch') | ||
plt.legend(['train', 'test'], loc='upper left') | ||
plt.show() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{"class_name": "Sequential", "keras_version": "1.1.1", "config": [{"class_name": "Convolution2D", "config": {"b_regularizer": null, "W_constraint": null, "b_constraint": null, "name": "convolution2d_1", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 32, "input_dtype": "float32", "border_mode": "same", "batch_input_shape": [null, 3, 32, 32], "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_1"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_2", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 32, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_2"}}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_1", "trainable": true, "dim_ordering": "th", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "valid"}}, {"class_name": "Dropout", "config": {"p": 0.25, "trainable": true, "name": "dropout_1"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_3", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 64, "border_mode": "same", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_3"}}, {"class_name": "Convolution2D", "config": {"W_constraint": null, "b_constraint": null, "name": "convolution2d_4", "activity_regularizer": null, "trainable": true, "dim_ordering": "th", "nb_col": 3, "subsample": [1, 1], "init": "glorot_uniform", "bias": true, "nb_filter": 64, "border_mode": "valid", "b_regularizer": null, "W_regularizer": null, "activation": "linear", "nb_row": 3}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_4"}}, {"class_name": "MaxPooling2D", "config": {"name": "maxpooling2d_2", "trainable": true, "dim_ordering": "th", "pool_size": [2, 2], "strides": [2, 2], "border_mode": "valid"}}, {"class_name": "Dropout", "config": {"p": 0.25, "trainable": true, "name": "dropout_2"}}, {"class_name": "Flatten", "config": {"trainable": true, "name": "flatten_1"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_1", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": null, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 512}}, {"class_name": "Activation", "config": {"activation": "relu", "trainable": true, "name": "activation_5"}}, {"class_name": "Dropout", "config": {"p": 0.5, "trainable": true, "name": "dropout_3"}}, {"class_name": "Dense", "config": {"W_constraint": null, "b_constraint": null, "name": "dense_2", "activity_regularizer": null, "trainable": true, "init": "glorot_uniform", "bias": true, "input_dim": null, "b_regularizer": null, "W_regularizer": null, "activation": "linear", "output_dim": 10}}, {"class_name": "Activation", "config": {"activation": "softmax", "trainable": true, "name": "activation_6"}}]} |
Oops, something went wrong.