Note

This notebook can be downloaded here: 3_ML_Tutorial_CNN.ipynb

7- Convolutional Neural Networks

LeNet-5 Convolutional Network architecture

The Convolution Operation

# Load the dataset
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
Using TensorFlow backend.
# Scale images from [0,255] to [0,1]
x_train_normalized = x_train / 255.0
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# %matplotlib nbagg
# %matplotlib ipympl
# %matplotlib notebook
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import numpy as np
# Sample a smaller dataset for testing
rand_idx = np.random.choice(x_train.shape[0], 10000)
x_train = x_train_normalized[rand_idx]
y_train = y_train[rand_idx]
x_train.shape
(10000, 28, 28)
# import tensorflow as tf
import keras
from keras.layers import AveragePooling2D, Conv2D, Dense, Flatten
cnn_model = keras.Sequential()

# First Convolutional Layer
print('** Add your code here **')

# Second Convolutional Layer
print('** Add your code here **')

# First Fully Connected Layer
cnn_model.add(Flatten())
cnn_model.add(Dense(units=100, activation='relu'))

# Second Fully Connected Layer
cnn_model.add(Dense(units=80, activation='relu'))

cnn_model.add(Dense(units=10, activation = 'softmax'))

# Compile the model as a TensorFlow graph
cnn_model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
cnn_model.summary()
SVG(model_to_dot(cnn_model, show_shapes=True).create(prog='dot', format='svg'))
_________________________________________________________________
Layer (type)                 Output Shape              Param #
=================================================================
conv2d_3 (Conv2D)            (None, 26, 26, 6)         60
_________________________________________________________________
average_pooling2d_3 (Average (None, 13, 13, 6)         0
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 11, 11, 16)        880
_________________________________________________________________
average_pooling2d_4 (Average (None, 5, 5, 16)          0
_________________________________________________________________
flatten_2 (Flatten)          (None, 400)               0
_________________________________________________________________
dense_4 (Dense)              (None, 100)               40100
_________________________________________________________________
dense_5 (Dense)              (None, 80)                8080
_________________________________________________________________
dense_6 (Dense)              (None, 10)                810
=================================================================
Total params: 49,930
Trainable params: 49,930
Non-trainable params: 0
_________________________________________________________________
../../../../_images/3_ML_Tutorial_CNN_5_1.svg
# Using Tensorboard
from time import time
from keras.callbacks import TensorBoard

def tb(logdir="logs", port=6006, open_tab=True, sleep=2):
    import subprocess
    proc = subprocess.Popen(
        "tensorboard --logdir={0} --port={1}".format(logdir, port), shell=True)
    if open_tab:
        import time
        time.sleep(sleep)
        import webbrowser
        webbrowser.open("http://127.0.0.1:{}/".format(port))
    return proc
tb()
<subprocess.Popen at 0x142e89d68>
tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
history = cnn_model.fit(x_train.reshape((-1, 28, 28, 1)), y_train, validation_split=0.2, epochs=50, shuffle=True, callbacks=[tensorboard], verbose=1)
WARNING:tensorflow:From /Users/Pierre/.virtualenvs/DeepQC/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3064: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 8000 samples, validate on 2000 samples
Epoch 1/50
8000/8000 [==============================] - 2s 268us/step - loss: 0.7755 - acc: 0.7702 - val_loss: 0.3852 - val_acc: 0.8785
Epoch 2/50
8000/8000 [==============================] - 2s 287us/step - loss: 0.2852 - acc: 0.9109 - val_loss: 0.2388 - val_acc: 0.9315
Epoch 3/50
8000/8000 [==============================] - 3s 327us/step - loss: 0.1899 - acc: 0.9406 - val_loss: 0.2014 - val_acc: 0.9400
Epoch 4/50
8000/8000 [==============================] - 2s 310us/step - loss: 0.1361 - acc: 0.9584 - val_loss: 0.1707 - val_acc: 0.9480
Epoch 5/50
8000/8000 [==============================] - 2s 290us/step - loss: 0.1051 - acc: 0.9675 - val_loss: 0.1333 - val_acc: 0.9620
Epoch 6/50
8000/8000 [==============================] - 3s 347us/step - loss: 0.0848 - acc: 0.9730 - val_loss: 0.1205 - val_acc: 0.9660
Epoch 7/50
8000/8000 [==============================] - 3s 382us/step - loss: 0.0731 - acc: 0.9769 - val_loss: 0.1096 - val_acc: 0.9685
Epoch 8/50
8000/8000 [==============================] - 2s 302us/step - loss: 0.0583 - acc: 0.9816 - val_loss: 0.0976 - val_acc: 0.9730
Epoch 9/50
8000/8000 [==============================] - 2s 294us/step - loss: 0.0508 - acc: 0.9831 - val_loss: 0.1113 - val_acc: 0.9660
Epoch 10/50
8000/8000 [==============================] - 3s 425us/step - loss: 0.0423 - acc: 0.9874 - val_loss: 0.0955 - val_acc: 0.9710
Epoch 11/50
8000/8000 [==============================] - 2s 269us/step - loss: 0.0325 - acc: 0.9906 - val_loss: 0.1188 - val_acc: 0.9650
Epoch 12/50
8000/8000 [==============================] - 2s 274us/step - loss: 0.0346 - acc: 0.9889 - val_loss: 0.1332 - val_acc: 0.9665
Epoch 13/50
8000/8000 [==============================] - 2s 311us/step - loss: 0.0365 - acc: 0.9878 - val_loss: 0.0916 - val_acc: 0.9750
Epoch 14/50
8000/8000 [==============================] - 3s 431us/step - loss: 0.0237 - acc: 0.9923 - val_loss: 0.0913 - val_acc: 0.9745
Epoch 15/50
8000/8000 [==============================] - 2s 311us/step - loss: 0.0186 - acc: 0.9941 - val_loss: 0.1227 - val_acc: 0.9700
Epoch 16/50
8000/8000 [==============================] - 2s 310us/step - loss: 0.0166 - acc: 0.9951 - val_loss: 0.1422 - val_acc: 0.9645
Epoch 17/50
8000/8000 [==============================] - 2s 293us/step - loss: 0.0164 - acc: 0.9950 - val_loss: 0.1903 - val_acc: 0.9540
Epoch 18/50
8000/8000 [==============================] - 3s 374us/step - loss: 0.0160 - acc: 0.9950 - val_loss: 0.1123 - val_acc: 0.9735
Epoch 19/50
8000/8000 [==============================] - 2s 265us/step - loss: 0.0158 - acc: 0.9945 - val_loss: 0.0991 - val_acc: 0.9720
Epoch 20/50
8000/8000 [==============================] - 3s 315us/step - loss: 0.0168 - acc: 0.9939 - val_loss: 0.1678 - val_acc: 0.9605
Epoch 21/50
8000/8000 [==============================] - 2s 295us/step - loss: 0.0231 - acc: 0.9925 - val_loss: 0.0892 - val_acc: 0.9790
Epoch 22/50
8000/8000 [==============================] - 3s 386us/step - loss: 0.0071 - acc: 0.9985 - val_loss: 0.0951 - val_acc: 0.9775
Epoch 23/50
8000/8000 [==============================] - 2s 310us/step - loss: 0.0039 - acc: 0.9989 - val_loss: 0.1161 - val_acc: 0.9775
Epoch 24/50
8000/8000 [==============================] - 2s 281us/step - loss: 0.0189 - acc: 0.9939 - val_loss: 0.1121 - val_acc: 0.9785
Epoch 25/50
8000/8000 [==============================] - 2s 303us/step - loss: 0.0087 - acc: 0.9967 - val_loss: 0.1234 - val_acc: 0.9715
Epoch 26/50
8000/8000 [==============================] - 3s 340us/step - loss: 0.0084 - acc: 0.9971 - val_loss: 0.1411 - val_acc: 0.9720
Epoch 27/50
8000/8000 [==============================] - 2s 303us/step - loss: 0.0122 - acc: 0.9970 - val_loss: 0.1372 - val_acc: 0.9690
Epoch 28/50
8000/8000 [==============================] - 2s 293us/step - loss: 0.0066 - acc: 0.9981 - val_loss: 0.0964 - val_acc: 0.9795
Epoch 29/50
8000/8000 [==============================] - 2s 300us/step - loss: 0.0018 - acc: 0.9995 - val_loss: 0.1028 - val_acc: 0.9815
Epoch 30/50
8000/8000 [==============================] - 3s 407us/step - loss: 3.1767e-04 - acc: 1.0000 - val_loss: 0.1069 - val_acc: 0.9810
Epoch 31/50
8000/8000 [==============================] - 3s 318us/step - loss: 2.2728e-04 - acc: 1.0000 - val_loss: 0.1060 - val_acc: 0.9805
Epoch 32/50
8000/8000 [==============================] - 2s 273us/step - loss: 1.6780e-04 - acc: 1.0000 - val_loss: 0.1074 - val_acc: 0.9815
Epoch 33/50
8000/8000 [==============================] - 3s 327us/step - loss: 1.3414e-04 - acc: 1.0000 - val_loss: 0.1095 - val_acc: 0.9805
Epoch 34/50
8000/8000 [==============================] - 3s 339us/step - loss: 1.1043e-04 - acc: 1.0000 - val_loss: 0.1121 - val_acc: 0.9800
Epoch 35/50
8000/8000 [==============================] - 2s 305us/step - loss: 9.3033e-05 - acc: 1.0000 - val_loss: 0.1138 - val_acc: 0.9805
Epoch 36/50
8000/8000 [==============================] - 2s 297us/step - loss: 7.6398e-05 - acc: 1.0000 - val_loss: 0.1152 - val_acc: 0.9810
Epoch 37/50
8000/8000 [==============================] - 3s 369us/step - loss: 6.4450e-05 - acc: 1.0000 - val_loss: 0.1148 - val_acc: 0.9800
Epoch 38/50
8000/8000 [==============================] - 3s 336us/step - loss: 5.5222e-05 - acc: 1.0000 - val_loss: 0.1174 - val_acc: 0.9805
Epoch 39/50
8000/8000 [==============================] - 2s 285us/step - loss: 4.5990e-05 - acc: 1.0000 - val_loss: 0.1178 - val_acc: 0.9795
Epoch 40/50
8000/8000 [==============================] - 2s 267us/step - loss: 4.1400e-05 - acc: 1.0000 - val_loss: 0.1193 - val_acc: 0.9810
Epoch 41/50
8000/8000 [==============================] - 2s 299us/step - loss: 3.5052e-05 - acc: 1.0000 - val_loss: 0.1209 - val_acc: 0.9795
Epoch 42/50
8000/8000 [==============================] - 3s 348us/step - loss: 3.2721e-05 - acc: 1.0000 - val_loss: 0.1222 - val_acc: 0.9800
Epoch 43/50
8000/8000 [==============================] - 2s 280us/step - loss: 2.9214e-05 - acc: 1.0000 - val_loss: 0.1247 - val_acc: 0.9800
Epoch 44/50
8000/8000 [==============================] - 2s 290us/step - loss: 2.4356e-05 - acc: 1.0000 - val_loss: 0.1244 - val_acc: 0.9800
Epoch 45/50
8000/8000 [==============================] - 3s 365us/step - loss: 2.0444e-05 - acc: 1.0000 - val_loss: 0.1263 - val_acc: 0.9795
Epoch 46/50
8000/8000 [==============================] - 2s 280us/step - loss: 1.8716e-05 - acc: 1.0000 - val_loss: 0.1272 - val_acc: 0.9795
Epoch 47/50
8000/8000 [==============================] - 2s 298us/step - loss: 1.5517e-05 - acc: 1.0000 - val_loss: 0.1280 - val_acc: 0.9795
Epoch 48/50
8000/8000 [==============================] - 2s 266us/step - loss: 1.4030e-05 - acc: 1.0000 - val_loss: 0.1282 - val_acc: 0.9805
Epoch 49/50
8000/8000 [==============================] - 2s 310us/step - loss: 1.2324e-05 - acc: 1.0000 - val_loss: 0.1306 - val_acc: 0.9805
Epoch 50/50
8000/8000 [==============================] - 3s 353us/step - loss: 1.0724e-05 - acc: 1.0000 - val_loss: 0.1336 - val_acc: 0.9795
test_loss, test_acc = cnn_model.evaluate(x_test.reshape((-1, 28, 28, 1)), y_test)
print('Test loss: %0.3f' % test_loss, 'Test accuracy: %0.3f' % test_acc)
10000/10000 [==============================] - 1s 84us/step
Test loss: 0.344 Test accuracy: 0.979
def plot_training_history(history):
    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
plot_training_history(history)
../../../../_images/3_ML_Tutorial_CNN_9_0.png ../../../../_images/3_ML_Tutorial_CNN_9_1.png

Going further..

  • Save and Load your trained model locally
  • Implement an Early Stopping keras.callbacks.EarlyStopping
  • Tune Hyperparameters keras.optimizers
  • Evaluate more complex/differents network architectures (Dropout is a must) ->Conv2D(filt=32)->Conv2D(filt=64)->Conv2D(filt=128)->MaxPool(2,2)->Dropout(0.25)->Dense(128)->Dropout(0.5)->
  • Data augmentation
  • Transfer Learning From a pre-trained VGG16 networks

Ressources

8 - Use your digit classifier!

import cv2
import time
# Capture a frame from Webcam
cam = cv2.VideoCapture(0)
print('Camera open:', cam.isOpened())
cam.set(3, 640)
cam.set(4, 480)
time.sleep(2)  # Wait for the webcam to auto-exposure and white balance
isCaptured, frame = cam.read()
print('Frame captured:', isCaptured)
cam.release()  # Release the camera when the frame is captured

# Plot the captured frame
plt.axis('off')
plt.title('Webcam frame')

frame_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
plt.imshow(frame_grey, cmap=plt.cm.gray, interpolation='nearest')

# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# plt.imshow(frame_rgb, cmap=plt.cm.gray, interpolation='nearest')
Camera open: True
Frame captured: True
<matplotlib.image.AxesImage at 0x128cb7b38>
../../../../_images/3_ML_Tutorial_CNN_13_2.png
frame_grey.shape
(480, 640)
# Crop the frame to a square format
frame_cropped = frame_grey[0:480, 80:560]
print(frame_cropped.shape)
plt.imshow(frame_cropped, cmap=plt.cm.gray, interpolation='nearest')
(480, 480)
<matplotlib.image.AxesImage at 0x128ef9eb8>
../../../../_images/3_ML_Tutorial_CNN_15_2.png
# Resize to (28, 28)
frame_28 = cv2.resize(frame_cropped, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)
plt.imshow(frame_28, cmap=plt.cm.gray, interpolation='nearest')
<matplotlib.image.AxesImage at 0x14af10470>
../../../../_images/3_ML_Tutorial_CNN_16_1.png
# Inverse and normalize the frame like in the MNIST dataset
frame_28_inv = cv2.bitwise_not(frame_28)
frame_28_norm = frame_28_inv / 255.0
plt.imshow(frame_28_norm, cmap=plt.cm.gray, interpolation='nearest')
<matplotlib.image.AxesImage at 0x14afbcfd0>
../../../../_images/3_ML_Tutorial_CNN_17_1.png
y_predicted_proba = cnn_model.predict(frame_28_norm.reshape((-1, 28, 28, 1)), verbose=1)
y_predicted_proba
1/1 [==============================] - 0s 39ms/step
array([[1.9088064e-05, 2.7585734e-04, 8.0542302e-01, 6.0738707e-03,
        1.2500411e-07, 4.2368796e-05, 4.8234921e-07, 1.9978394e-13,
        1.8816522e-01, 4.1521577e-09]], dtype=float32)
# y_predicted_class = keras.np_utils.probas_to_classes(y_predicted_proba)
y_predicted_class = y_predicted_proba.argmax(axis=-1)
y_predicted_class
array([2])