# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 28, 28) (200000,) Validation set (10000, 28, 28) (10000,) Test set (10000, 28, 28) (10000,)
Reformat into a TensorFlow-friendly shape:
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 28, 28, 1) (200000, 10) Validation set (10000, 28, 28, 1) (10000, 10) Test set (10000, 28, 28, 1) (10000, 10)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(conv + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0: 3.139105 Minibatch accuracy: 12.5% Validation accuracy: 11.9% Minibatch loss at step 100: 1.115680 Minibatch accuracy: 62.5% Validation accuracy: 67.1% Minibatch loss at step 200: 0.356532 Minibatch accuracy: 87.5% Validation accuracy: 78.4% Minibatch loss at step 300: 0.310974 Minibatch accuracy: 93.8% Validation accuracy: 78.7% Minibatch loss at step 400: 0.602386 Minibatch accuracy: 68.8% Validation accuracy: 79.5% Minibatch loss at step 500: 0.441770 Minibatch accuracy: 87.5% Validation accuracy: 81.2% Minibatch loss at step 600: 0.840172 Minibatch accuracy: 75.0% Validation accuracy: 82.3% Minibatch loss at step 700: 1.235861 Minibatch accuracy: 68.8% Validation accuracy: 82.9% Minibatch loss at step 800: 0.885891 Minibatch accuracy: 81.2% Validation accuracy: 82.5% Minibatch loss at step 900: 0.300222 Minibatch accuracy: 87.5% Validation accuracy: 82.6% Minibatch loss at step 1000: 0.506481 Minibatch accuracy: 87.5% Validation accuracy: 84.0% Test accuracy: 90.1%
The convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides by a max pooling operation (nn.max_pool()
) of stride 2 and kernel size 2.
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 1, 1, 1], padding='SAME')
pool = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(pool + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 1, 1, 1], padding='SAME')
pool = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(pool + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
return tf.matmul(hidden, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0: 2.883915 Minibatch accuracy: 12.5% Validation accuracy: 10.0% Minibatch loss at step 100: 1.051122 Minibatch accuracy: 56.2% Validation accuracy: 59.1% Minibatch loss at step 200: 0.415176 Minibatch accuracy: 87.5% Validation accuracy: 76.4% Minibatch loss at step 300: 0.316238 Minibatch accuracy: 87.5% Validation accuracy: 81.7% Minibatch loss at step 400: 0.553747 Minibatch accuracy: 75.0% Validation accuracy: 80.0% Minibatch loss at step 500: 0.250361 Minibatch accuracy: 93.8% Validation accuracy: 81.5% Minibatch loss at step 600: 0.627140 Minibatch accuracy: 68.8% Validation accuracy: 83.7% Minibatch loss at step 700: 1.068274 Minibatch accuracy: 75.0% Validation accuracy: 84.3% Minibatch loss at step 800: 0.719155 Minibatch accuracy: 75.0% Validation accuracy: 83.8% Minibatch loss at step 900: 0.629290 Minibatch accuracy: 81.2% Validation accuracy: 84.5% Minibatch loss at step 1000: 0.510888 Minibatch accuracy: 81.2% Validation accuracy: 84.2% Test accuracy: 89.8%
batch_size = 16
patch_size = 5
depth = 16
num_hidden = 64
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Placeholder to control dropout probability.
keep_prob = tf.placeholder(tf.float32)
# Variables.
layer1_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
layer1_biases = tf.Variable(tf.zeros([depth]))
layer2_weights = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev=0.1))
layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth]))
layer3_weights = tf.Variable(tf.truncated_normal(
[image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1))
layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
layer4_weights = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))
# Model.
def model(data):
conv = tf.nn.conv2d(data, layer1_weights, [1, 1, 1, 1], padding='SAME')
pool = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(pool + layer1_biases)
conv = tf.nn.conv2d(hidden, layer2_weights, [1, 1, 1, 1], padding='SAME')
pool = tf.nn.max_pool(conv, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
hidden = tf.nn.relu(pool + layer2_biases)
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
drop = tf.nn.dropout(hidden, keep_prob)
return tf.matmul(drop, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 1.0}
feed_dict_w_drop = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict_w_drop)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(feed_dict=feed_dict), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(feed_dict=feed_dict), test_labels))
Initialized Minibatch loss at step 0: 4.529957 Minibatch accuracy: 18.8% Validation accuracy: 12.3% Minibatch loss at step 100: 1.826838 Minibatch accuracy: 50.0% Validation accuracy: 46.1% Minibatch loss at step 200: 1.026006 Minibatch accuracy: 62.5% Validation accuracy: 69.2% Minibatch loss at step 300: 0.719998 Minibatch accuracy: 87.5% Validation accuracy: 76.9% Minibatch loss at step 400: 1.125187 Minibatch accuracy: 62.5% Validation accuracy: 78.8% Minibatch loss at step 500: 0.659142 Minibatch accuracy: 68.8% Validation accuracy: 80.9% Minibatch loss at step 600: 1.144517 Minibatch accuracy: 62.5% Validation accuracy: 81.3% Minibatch loss at step 700: 1.284646 Minibatch accuracy: 62.5% Validation accuracy: 81.7% Minibatch loss at step 800: 1.318694 Minibatch accuracy: 68.8% Validation accuracy: 82.3% Minibatch loss at step 900: 0.596128 Minibatch accuracy: 81.2% Validation accuracy: 83.2% Minibatch loss at step 1000: 0.593340 Minibatch accuracy: 81.2% Validation accuracy: 83.5% Minibatch loss at step 1100: 1.036340 Minibatch accuracy: 81.2% Validation accuracy: 83.8% Minibatch loss at step 1200: 0.578478 Minibatch accuracy: 81.2% Validation accuracy: 84.5% Minibatch loss at step 1300: 1.571593 Minibatch accuracy: 43.8% Validation accuracy: 83.8% Minibatch loss at step 1400: 0.659016 Minibatch accuracy: 75.0% Validation accuracy: 84.5% Minibatch loss at step 1500: 0.455753 Minibatch accuracy: 93.8% Validation accuracy: 84.9% Minibatch loss at step 1600: 0.458268 Minibatch accuracy: 81.2% Validation accuracy: 84.5% Minibatch loss at step 1700: 0.856795 Minibatch accuracy: 81.2% Validation accuracy: 84.9% Minibatch loss at step 1800: 0.423227 Minibatch accuracy: 81.2% Validation accuracy: 85.0% Minibatch loss at step 1900: 0.750963 Minibatch accuracy: 87.5% Validation accuracy: 84.8% Minibatch loss at step 2000: 0.760005 Minibatch accuracy: 75.0% Validation accuracy: 85.4% Minibatch loss at step 2100: 0.779080 Minibatch accuracy: 75.0% Validation accuracy: 85.5% Minibatch loss at step 2200: 0.371877 Minibatch accuracy: 93.8% Validation accuracy: 85.4% Minibatch loss at step 2300: 0.551173 Minibatch accuracy: 87.5% Validation accuracy: 85.1% Minibatch loss at step 2400: 0.251618 Minibatch accuracy: 93.8% Validation accuracy: 86.0% Minibatch loss at step 2500: 1.167190 Minibatch accuracy: 68.8% Validation accuracy: 84.7% Minibatch loss at step 2600: 1.088484 Minibatch accuracy: 56.2% Validation accuracy: 85.0% Minibatch loss at step 2700: 0.663480 Minibatch accuracy: 81.2% Validation accuracy: 85.2% Minibatch loss at step 2800: 0.869098 Minibatch accuracy: 68.8% Validation accuracy: 86.3% Minibatch loss at step 2900: 0.405881 Minibatch accuracy: 81.2% Validation accuracy: 86.4% Minibatch loss at step 3000: 0.435097 Minibatch accuracy: 87.5% Validation accuracy: 86.0% Test accuracy: 92.1%