Convolutional Neural Network
from keras.models import Sequential
from keras.layers import Dense, Flatten, Convolution2D, MaxPooling2D, Dropout
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
from keras import initializations
from keras import backend as K
Using TensorFlow backend.
batch_size = 128
nb_classes = 10
nb_epoch = 100
img_rows, img_cols = 28, 28
pool_size = (2, 2)
prob_drop_conv = 0.2
prob_drop_hidden = 0.5
def init_weights(shape, name=None):
return initializations.normal(shape, scale=0.01, name=name)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print('X_train original shape:', X_train.shape)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train original shape: (60000, 28, 28)
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
X_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu', input_shape=input_shape, init=init_weights))
model.add(MaxPooling2D(pool_size=pool_size, strides=(2,2), border_mode='same'))
model.add(Dropout(prob_drop_conv))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', init=init_weights))
model.add(MaxPooling2D(pool_size=pool_size, strides=(2,2), border_mode='same'))
model.add(Dropout(prob_drop_conv))
model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu', init=init_weights))
model.add(MaxPooling2D(pool_size=pool_size, strides=(2,2), border_mode='same'))
model.add(Flatten())
model.add(Dropout(prob_drop_conv))
model.add(Dense(625, activation='relu', init=init_weights))
model.add(Dropout(prob_drop_hidden))
model.add(Dense(10, activation='softmax', init=init_weights))
opt = RMSprop(lr=0.001, rho=0.9)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
convolution2d_1 (Convolution2D) (None, 28, 28, 32) 320 convolution2d_input_1[0][0]
____________________________________________________________________________________________________
maxpooling2d_1 (MaxPooling2D) (None, 14, 14, 32) 0 convolution2d_1[0][0]
____________________________________________________________________________________________________
dropout_1 (Dropout) (None, 14, 14, 32) 0 maxpooling2d_1[0][0]
____________________________________________________________________________________________________
convolution2d_2 (Convolution2D) (None, 14, 14, 64) 18496 dropout_1[0][0]
____________________________________________________________________________________________________
maxpooling2d_2 (MaxPooling2D) (None, 7, 7, 64) 0 convolution2d_2[0][0]
____________________________________________________________________________________________________
dropout_2 (Dropout) (None, 7, 7, 64) 0 maxpooling2d_2[0][0]
____________________________________________________________________________________________________
convolution2d_3 (Convolution2D) (None, 7, 7, 128) 73856 dropout_2[0][0]
____________________________________________________________________________________________________
maxpooling2d_3 (MaxPooling2D) (None, 4, 4, 128) 0 convolution2d_3[0][0]
____________________________________________________________________________________________________
flatten_1 (Flatten) (None, 2048) 0 maxpooling2d_3[0][0]
____________________________________________________________________________________________________
dropout_3 (Dropout) (None, 2048) 0 flatten_1[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 625) 1280625 dropout_3[0][0]
____________________________________________________________________________________________________
dropout_4 (Dropout) (None, 625) 0 dense_1[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 10) 6260 dropout_4[0][0]
====================================================================================================
Total params: 1379557
____________________________________________________________________________________________________
history = model.fit(X_train, Y_train, nb_epoch=nb_epoch, batch_size=batch_size, shuffle=True, verbose=1)
Epoch 1/100
60000/60000 [==============================] - 6s - loss: 0.4027 - acc: 0.8651
Epoch 2/100
60000/60000 [==============================] - 5s - loss: 0.1045 - acc: 0.9677
Epoch 3/100
60000/60000 [==============================] - 5s - loss: 0.0732 - acc: 0.9773
Epoch 4/100
60000/60000 [==============================] - 5s - loss: 0.0572 - acc: 0.9819
Epoch 5/100
60000/60000 [==============================] - 5s - loss: 0.0493 - acc: 0.9848
Epoch 6/100
60000/60000 [==============================] - 5s - loss: 0.0444 - acc: 0.9867
Epoch 7/100
60000/60000 [==============================] - 5s - loss: 0.0407 - acc: 0.9878
Epoch 8/100
60000/60000 [==============================] - 5s - loss: 0.0379 - acc: 0.9882
Epoch 9/100
60000/60000 [==============================] - 5s - loss: 0.0354 - acc: 0.9897
Epoch 10/100
60000/60000 [==============================] - 5s - loss: 0.0341 - acc: 0.9897
Epoch 11/100
60000/60000 [==============================] - 5s - loss: 0.0310 - acc: 0.9910
Epoch 12/100
60000/60000 [==============================] - 5s - loss: 0.0305 - acc: 0.9907
Epoch 13/100
60000/60000 [==============================] - 5s - loss: 0.0309 - acc: 0.9908
Epoch 14/100
60000/60000 [==============================] - 5s - loss: 0.0299 - acc: 0.9913
Epoch 15/100
60000/60000 [==============================] - 5s - loss: 0.0294 - acc: 0.9913
Epoch 16/100
60000/60000 [==============================] - 5s - loss: 0.0299 - acc: 0.9918
Epoch 17/100
60000/60000 [==============================] - 5s - loss: 0.0298 - acc: 0.9913
Epoch 18/100
60000/60000 [==============================] - 5s - loss: 0.0286 - acc: 0.9922
Epoch 19/100
60000/60000 [==============================] - 5s - loss: 0.0268 - acc: 0.9920
Epoch 20/100
60000/60000 [==============================] - 5s - loss: 0.0295 - acc: 0.9916
Epoch 21/100
60000/60000 [==============================] - 5s - loss: 0.0296 - acc: 0.9917
Epoch 22/100
60000/60000 [==============================] - 5s - loss: 0.0281 - acc: 0.9920
Epoch 23/100
60000/60000 [==============================] - 5s - loss: 0.0294 - acc: 0.9920
Epoch 24/100
60000/60000 [==============================] - 5s - loss: 0.0303 - acc: 0.9915
Epoch 25/100
60000/60000 [==============================] - 5s - loss: 0.0319 - acc: 0.9912
Epoch 26/100
60000/60000 [==============================] - 5s - loss: 0.0310 - acc: 0.9916
Epoch 27/100
60000/60000 [==============================] - 5s - loss: 0.0300 - acc: 0.9913
Epoch 28/100
60000/60000 [==============================] - 5s - loss: 0.0316 - acc: 0.9911
Epoch 29/100
60000/60000 [==============================] - 5s - loss: 0.0325 - acc: 0.9913
Epoch 30/100
60000/60000 [==============================] - 5s - loss: 0.0329 - acc: 0.9918
Epoch 31/100
60000/60000 [==============================] - 5s - loss: 0.0326 - acc: 0.9910
Epoch 32/100
60000/60000 [==============================] - 5s - loss: 0.0352 - acc: 0.9905
Epoch 33/100
60000/60000 [==============================] - 5s - loss: 0.0348 - acc: 0.9904
Epoch 34/100
60000/60000 [==============================] - 5s - loss: 0.0344 - acc: 0.9909
Epoch 35/100
60000/60000 [==============================] - 5s - loss: 0.0380 - acc: 0.9906
Epoch 36/100
60000/60000 [==============================] - 5s - loss: 0.0338 - acc: 0.9909
Epoch 37/100
60000/60000 [==============================] - 5s - loss: 0.0343 - acc: 0.9911
Epoch 38/100
60000/60000 [==============================] - 5s - loss: 0.0343 - acc: 0.9905
Epoch 39/100
60000/60000 [==============================] - 5s - loss: 0.0355 - acc: 0.9905
Epoch 40/100
60000/60000 [==============================] - 5s - loss: 0.0351 - acc: 0.9906
Epoch 41/100
60000/60000 [==============================] - 5s - loss: 0.0382 - acc: 0.9903
Epoch 42/100
60000/60000 [==============================] - 5s - loss: 0.0388 - acc: 0.9898
Epoch 43/100
60000/60000 [==============================] - 5s - loss: 0.0380 - acc: 0.9896
Epoch 44/100
60000/60000 [==============================] - 5s - loss: 0.0387 - acc: 0.9898
Epoch 45/100
60000/60000 [==============================] - 5s - loss: 0.0445 - acc: 0.9892
Epoch 46/100
60000/60000 [==============================] - 5s - loss: 0.0405 - acc: 0.9899
Epoch 47/100
60000/60000 [==============================] - 5s - loss: 0.0409 - acc: 0.9897
Epoch 48/100
60000/60000 [==============================] - 5s - loss: 0.0403 - acc: 0.9897
Epoch 49/100
60000/60000 [==============================] - 5s - loss: 0.0390 - acc: 0.9903
Epoch 50/100
60000/60000 [==============================] - 5s - loss: 0.0443 - acc: 0.9893
Epoch 51/100
60000/60000 [==============================] - 5s - loss: 0.0414 - acc: 0.9897
Epoch 52/100
60000/60000 [==============================] - 5s - loss: 0.0437 - acc: 0.9890
Epoch 53/100
60000/60000 [==============================] - 5s - loss: 0.0467 - acc: 0.9893
Epoch 54/100
60000/60000 [==============================] - 5s - loss: 0.0455 - acc: 0.9881
Epoch 55/100
60000/60000 [==============================] - 5s - loss: 0.0441 - acc: 0.9892
Epoch 56/100
60000/60000 [==============================] - 5s - loss: 0.0469 - acc: 0.9885
Epoch 57/100
60000/60000 [==============================] - 5s - loss: 0.0474 - acc: 0.9886
Epoch 58/100
60000/60000 [==============================] - 5s - loss: 0.0484 - acc: 0.9880
Epoch 59/100
60000/60000 [==============================] - 5s - loss: 0.0505 - acc: 0.9878
Epoch 60/100
60000/60000 [==============================] - 5s - loss: 0.0470 - acc: 0.9888
Epoch 61/100
60000/60000 [==============================] - 5s - loss: 0.0478 - acc: 0.9882
Epoch 62/100
60000/60000 [==============================] - 5s - loss: 0.0512 - acc: 0.9881
Epoch 63/100
60000/60000 [==============================] - 5s - loss: 0.0562 - acc: 0.9876
Epoch 64/100
60000/60000 [==============================] - 5s - loss: 0.0496 - acc: 0.9879
Epoch 65/100
60000/60000 [==============================] - 5s - loss: 0.0504 - acc: 0.9879
Epoch 66/100
60000/60000 [==============================] - 5s - loss: 0.0508 - acc: 0.9875
Epoch 67/100
60000/60000 [==============================] - 5s - loss: 0.0548 - acc: 0.9872
Epoch 68/100
60000/60000 [==============================] - 5s - loss: 0.0532 - acc: 0.9871
Epoch 69/100
60000/60000 [==============================] - 5s - loss: 0.0508 - acc: 0.9879
Epoch 70/100
60000/60000 [==============================] - 5s - loss: 0.0533 - acc: 0.9870
Epoch 71/100
60000/60000 [==============================] - 5s - loss: 0.0524 - acc: 0.9873
Epoch 72/100
60000/60000 [==============================] - 5s - loss: 0.0549 - acc: 0.9874
Epoch 73/100
60000/60000 [==============================] - 5s - loss: 0.0558 - acc: 0.9861
Epoch 74/100
60000/60000 [==============================] - 5s - loss: 0.0531 - acc: 0.9864
Epoch 75/100
60000/60000 [==============================] - 5s - loss: 0.0539 - acc: 0.9864
Epoch 76/100
60000/60000 [==============================] - 5s - loss: 0.0574 - acc: 0.9862
Epoch 77/100
60000/60000 [==============================] - 5s - loss: 0.0574 - acc: 0.9862
Epoch 78/100
60000/60000 [==============================] - 5s - loss: 0.0597 - acc: 0.9856
Epoch 79/100
60000/60000 [==============================] - 5s - loss: 0.0582 - acc: 0.9856
Epoch 80/100
60000/60000 [==============================] - 5s - loss: 0.0592 - acc: 0.9854
Epoch 81/100
60000/60000 [==============================] - 5s - loss: 0.0571 - acc: 0.9865
Epoch 82/100
60000/60000 [==============================] - 5s - loss: 0.0690 - acc: 0.9847
Epoch 83/100
60000/60000 [==============================] - 5s - loss: 0.0598 - acc: 0.9856
Epoch 84/100
60000/60000 [==============================] - 5s - loss: 0.0606 - acc: 0.9855
Epoch 85/100
60000/60000 [==============================] - 5s - loss: 0.0621 - acc: 0.9849
Epoch 86/100
60000/60000 [==============================] - 5s - loss: 0.0673 - acc: 0.9844
Epoch 87/100
60000/60000 [==============================] - 5s - loss: 0.0599 - acc: 0.9850
Epoch 88/100
60000/60000 [==============================] - 5s - loss: 0.0571 - acc: 0.9858
Epoch 89/100
60000/60000 [==============================] - 5s - loss: 0.0651 - acc: 0.9841
Epoch 90/100
60000/60000 [==============================] - 5s - loss: 0.0661 - acc: 0.9838
Epoch 91/100
60000/60000 [==============================] - 5s - loss: 0.0673 - acc: 0.9833
Epoch 92/100
60000/60000 [==============================] - 5s - loss: 0.0663 - acc: 0.9838
Epoch 93/100
60000/60000 [==============================] - 5s - loss: 0.0736 - acc: 0.9827
Epoch 94/100
60000/60000 [==============================] - 5s - loss: 0.0647 - acc: 0.9840
Epoch 95/100
60000/60000 [==============================] - 5s - loss: 0.0660 - acc: 0.9837
Epoch 96/100
60000/60000 [==============================] - 5s - loss: 0.0646 - acc: 0.9837
Epoch 97/100
60000/60000 [==============================] - 5s - loss: 0.0635 - acc: 0.9844
Epoch 98/100
60000/60000 [==============================] - 5s - loss: 0.0638 - acc: 0.9841
Epoch 99/100
60000/60000 [==============================] - 5s - loss: 0.0685 - acc: 0.9832
Epoch 100/100
60000/60000 [==============================] - 5s - loss: 0.0664 - acc: 0.9838
evaluation = model.evaluate(X_test, Y_test, batch_size=256, verbose=1)
print('Summary: Loss over the test dataset: %.2f, Accuracy: %.2f' % (evaluation[0], evaluation[1]))
9984/10000 [============================>.] - ETA: 0sSummary: Loss over the test dataset: 0.03, Accuracy: 0.99