# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
First reload the data we generated in notmist.ipynb.
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 28, 28) (200000,) Validation set (10000, 28, 28) (10000,) Test set (10000, 28, 28) (10000,)
Reformat into a shape that's more adapted to the models we're going to train:
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 784) (200000, 10) Validation set (10000, 784) (10000, 10) Test set (10000, 784) (10000, 10)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor t
using nn.l2_loss(t)
. The right amount of regularization should improve your validation / test accuracy.
Let's start with logistic regression first. The only change from lab 2 is the regularizer. Note that I didn't tune the hyperparamter (beta) so the test accuracy is actually slightly worse in this case.
batch_size = 128
learning_rate = 0.5
beta = 0.05
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Add the regularization term to the loss.
loss += beta * tf.nn.l2_loss(weights)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
This part is the exact same as lab 2.
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0: 171.807022 Minibatch accuracy: 6.2% Validation accuracy: 9.8% Minibatch loss at step 500: 0.944785 Minibatch accuracy: 78.1% Validation accuracy: 78.8% Minibatch loss at step 1000: 0.881135 Minibatch accuracy: 82.8% Validation accuracy: 79.2% Minibatch loss at step 1500: 0.883581 Minibatch accuracy: 84.4% Validation accuracy: 79.9% Minibatch loss at step 2000: 0.909441 Minibatch accuracy: 77.3% Validation accuracy: 79.2% Minibatch loss at step 2500: 0.906707 Minibatch accuracy: 85.2% Validation accuracy: 79.9% Minibatch loss at step 3000: 1.037380 Minibatch accuracy: 78.1% Validation accuracy: 79.0% Test accuracy: 85.4%
Now let's try the same thing for the neural network model.
batch_size = 128
hidden_nodes = 1024
learning_rate = 0.5
beta = 0.005
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights_1 = tf.Variable(
tf.truncated_normal([image_size * image_size, hidden_nodes]))
biases_1 = tf.Variable(tf.zeros([hidden_nodes]))
weights_2 = tf.Variable(
tf.truncated_normal([hidden_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
def forward_prop(input):
h1 = tf.nn.relu(tf.matmul(input, weights_1) + biases_1)
return tf.matmul(h1, weights_2) + biases_2
logits = forward_prop(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Add the regularization term to the loss.
loss += beta * (tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(forward_prop(tf_valid_dataset))
test_prediction = tf.nn.softmax(forward_prop(tf_test_dataset))
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0: 1939.758911 Minibatch accuracy: 10.9% Validation accuracy: 28.2% Minibatch loss at step 500: 126.945297 Minibatch accuracy: 82.8% Validation accuracy: 82.9% Minibatch loss at step 1000: 10.868656 Minibatch accuracy: 88.3% Validation accuracy: 85.1% Minibatch loss at step 1500: 1.389078 Minibatch accuracy: 89.8% Validation accuracy: 85.4% Minibatch loss at step 2000: 0.681626 Minibatch accuracy: 88.3% Validation accuracy: 85.6% Minibatch loss at step 2500: 0.648225 Minibatch accuracy: 85.2% Validation accuracy: 85.6% Minibatch loss at step 3000: 0.788015 Minibatch accuracy: 78.9% Validation accuracy: 85.4% Test accuracy: 90.8%
Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?
train_dataset_restricted = train_dataset[:3000, :]
train_labels_restricted = train_labels[:3000, :]
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels_restricted.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset_restricted[offset:(offset + batch_size), :]
batch_labels = train_labels_restricted[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0: 1843.877930 Minibatch accuracy: 9.4% Validation accuracy: 22.7% Minibatch loss at step 500: 128.225174 Minibatch accuracy: 99.2% Validation accuracy: 79.3% Minibatch loss at step 1000: 10.765879 Minibatch accuracy: 100.0% Validation accuracy: 82.5% Minibatch loss at step 1500: 1.253613 Minibatch accuracy: 99.2% Validation accuracy: 82.2% Minibatch loss at step 2000: 0.430536 Minibatch accuracy: 100.0% Validation accuracy: 82.8% Minibatch loss at step 2500: 0.433309 Minibatch accuracy: 97.7% Validation accuracy: 83.0% Minibatch loss at step 3000: 0.365732 Minibatch accuracy: 99.2% Validation accuracy: 82.7% Test accuracy: 88.6%
Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides nn.dropout()
for that, but you have to make sure it's only inserted during training.
What happens to our extreme overfitting case?
batch_size = 128
hidden_nodes = 1024
learning_rate = 0.5
beta = 0.005
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Placeholder to control dropout probability.
keep_prob = tf.placeholder(tf.float32)
# Variables.
weights_1 = tf.Variable(
tf.truncated_normal([image_size * image_size, hidden_nodes]))
biases_1 = tf.Variable(tf.zeros([hidden_nodes]))
weights_2 = tf.Variable(
tf.truncated_normal([hidden_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
def forward_prop(input):
h1 = tf.nn.relu(tf.matmul(input, weights_1) + biases_1)
# Add dropout to the hidden layer.
drop = tf.nn.dropout(h1, keep_prob)
return tf.matmul(drop, weights_2) + biases_2
logits = forward_prop(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Add the regularization term to the loss.
loss += beta * (tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(forward_prop(tf_valid_dataset))
test_prediction = tf.nn.softmax(forward_prop(tf_test_dataset))
train_dataset_restricted = train_dataset[:3000, :]
train_labels_restricted = train_labels[:3000, :]
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 1.0}
feed_dict_w_drop = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict_w_drop)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(feed_dict=feed_dict), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(feed_dict=feed_dict), test_labels))
Initialized Minibatch loss at step 0: 2118.435547 Minibatch accuracy: 14.1% Validation accuracy: 36.0% Minibatch loss at step 500: 128.232056 Minibatch accuracy: 78.1% Validation accuracy: 82.4% Minibatch loss at step 1000: 10.991631 Minibatch accuracy: 86.7% Validation accuracy: 85.0% Minibatch loss at step 1500: 1.463545 Minibatch accuracy: 86.7% Validation accuracy: 85.1% Minibatch loss at step 2000: 0.784623 Minibatch accuracy: 86.7% Validation accuracy: 85.2% Minibatch loss at step 2500: 0.701598 Minibatch accuracy: 82.8% Validation accuracy: 85.1% Minibatch loss at step 3000: 0.880962 Minibatch accuracy: 78.9% Validation accuracy: 84.8% Test accuracy: 90.5%
Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is 97.1%.
One avenue you can explore is to add multiple layers.
Another one is to use learning rate decay:
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(0.5, global_step, ...)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
Note that I added a 2nd hidden layer in the code but then commented it out because it made the performance worse, and my VM is too slow to spend time tuning hyperparameters. You get the idea!
batch_size = 128
hidden_nodes = 1024
hidden_nodes_2 = 500
beta = 0.005
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Placeholder to control dropout probability.
keep_prob = tf.placeholder(tf.float32)
# Variables.
weights_1 = tf.Variable(tf.truncated_normal([image_size * image_size, hidden_nodes]))
biases_1 = tf.Variable(tf.zeros([hidden_nodes]))
weights_2 = tf.Variable(tf.truncated_normal([hidden_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# This is what the weights look like with an additional hidden layer.
# weights_2 = tf.Variable(tf.truncated_normal([hidden_nodes, hidden_nodes_2]))
# biases_2 = tf.Variable(tf.zeros([hidden_nodes_2]))
# weights_3 = tf.Variable(tf.truncated_normal([hidden_nodes_2, num_labels]))
# biases_3 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# def forward_prop(input):
# h1 = tf.nn.relu(tf.matmul(input, weights_1) + biases_1)
# drop = tf.nn.dropout(h1, keep_prob)
# h2 = tf.nn.relu(tf.matmul(h1, weights_2) + biases_2)
# drop = tf.nn.dropout(h2, keep_prob)
# return tf.matmul(drop, weights_3) + biases_3
def forward_prop(input):
h1 = tf.nn.relu(tf.matmul(input, weights_1) + biases_1)
drop = tf.nn.dropout(h1, keep_prob)
return tf.matmul(drop, weights_2) + biases_2
logits = forward_prop(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Add the regularization term to the loss.
# loss += beta * (tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2) + tf.nn.l2_loss(weights_3))
loss += beta * (tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2))
# Optimizer w/ learning rate decay.
global_step = tf.Variable(0)
learning_rate = tf.train.exponential_decay(0.5, global_step, 1000, 0.9)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(forward_prop(tf_valid_dataset))
test_prediction = tf.nn.softmax(forward_prop(tf_test_dataset))
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 1.0}
feed_dict_w_drop = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, keep_prob: 0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict_w_drop)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(feed_dict=feed_dict), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(feed_dict=feed_dict), test_labels))
Initialized Minibatch loss at step 0: 2106.596436 Minibatch accuracy: 6.2% Validation accuracy: 25.3% Minibatch loss at step 500: 137.231522 Minibatch accuracy: 77.3% Validation accuracy: 82.6% Minibatch loss at step 1000: 13.910622 Minibatch accuracy: 87.5% Validation accuracy: 84.7% Minibatch loss at step 1500: 2.107945 Minibatch accuracy: 85.2% Validation accuracy: 85.3% Minibatch loss at step 2000: 0.921251 Minibatch accuracy: 83.6% Validation accuracy: 84.8% Minibatch loss at step 2500: 0.718915 Minibatch accuracy: 84.4% Validation accuracy: 85.2% Minibatch loss at step 3000: 0.906768 Minibatch accuracy: 75.8% Validation accuracy: 85.0% Test accuracy: 90.8%