This IPython notebook is amalgamation of codes available on these 2 resources ->
Install the following packages -
pip install Theono
)# Import the required modules
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn import datasets
from lasagne import layers
from lasagne import init
from lasagne import nonlinearities
from lasagne.updates import sgd,nesterov_momentum
from nolearn.lasagne import NeuralNet
import numpy as np
%pylab inline
Populating the interactive namespace from numpy and matplotlib
# Load the training and testing set
dataset = datasets.fetch_mldata("MNIST Original")
(trainX, testX, trainY, testY) = train_test_split((dataset.data / 255.0).astype(np.float32), dataset.target.astype(np.int32), test_size = 0.33)
# Create the classifier
clf = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape = (None, trainX.shape[1]),
hidden_num_units=100,
output_num_units=10,
output_nonlinearity=nonlinearities.softmax,
update=nesterov_momentum,
#update=sgd,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
max_epochs=20,
verbose=1,
#W=init.Uniform()
)
# Perform the training
clf.fit(trainX, trainY)
input (None, 784) produces 784 outputs hidden (None, 100) produces 100 outputs output (None, 10) produces 10 outputs Epoch | Train loss | Valid loss | Train / Val | Valid acc | Dur --------|--------------|--------------|---------------|-------------|------- 1 | 0.584614 | 0.344075 | 1.699087 | 90.41% | 2.2s 2 | 0.304825 | 0.285069 | 1.069301 | 91.85% | 1.8s 3 | 0.257520 | 0.250602 | 1.027604 | 92.88% | 1.8s 4 | 0.225423 | 0.225860 | 0.998066 | 93.61% | 1.8s 5 | 0.200749 | 0.207713 | 0.966475 | 94.25% | 1.8s 6 | 0.181049 | 0.193451 | 0.935888 | 94.59% | 1.8s 7 | 0.164904 | 0.181389 | 0.909117 | 94.96% | 1.8s 8 | 0.151395 | 0.171604 | 0.882234 | 95.22% | 1.7s 9 | 0.139916 | 0.163222 | 0.857209 | 95.41% | 2.9s 10 | 0.129993 | 0.156027 | 0.833147 | 95.65% | 2.4s 11 | 0.121252 | 0.149896 | 0.808904 | 95.76% | 3.7s 12 | 0.113535 | 0.144447 | 0.785995 | 95.91% | 2.3s 13 | 0.106624 | 0.139752 | 0.762946 | 95.99% | 2.2s 14 | 0.100366 | 0.135412 | 0.741190 | 96.11% | 2.3s 15 | 0.094659 | 0.131647 | 0.719040 | 96.26% | 2.2s 16 | 0.089504 | 0.128271 | 0.697774 | 96.33% | 2.3s 17 | 0.084747 | 0.125208 | 0.676852 | 96.43% | 2.4s 18 | 0.080371 | 0.122598 | 0.655569 | 96.46% | 2.3s 19 | 0.076373 | 0.120191 | 0.635432 | 96.48% | 2.2s 20 | 0.072641 | 0.118045 | 0.615364 | 96.55% | 2.2s
/usr/local/lib/python2.7/dist-packages/Lasagne-0.1dev-py2.7.egg/lasagne/init.py:30: UserWarning: The uniform initializer no longer uses Glorot et al.'s approach to determine the bounds, but defaults to the range (-0.01, 0.01) instead. Please use the new GlorotUniform initializer to get the old behavior. GlorotUniform is now the default for all layers. warnings.warn("The uniform initializer no longer uses Glorot et al.'s "
NeuralNet(X_tensor_type=<function matrix at 0x7ff89b31faa0>, batch_iterator_test=<nolearn.lasagne.BatchIterator object at 0x7ff8996918d0>, batch_iterator_train=<nolearn.lasagne.BatchIterator object at 0x7ff899691850>, eval_size=0.2, hidden_num_units=100, input_shape=(None, 784), layers=[('input', <class 'lasagne.layers.input.InputLayer'>), ('hidden', <class 'lasagne.layers.dense.DenseLayer'>), ('output', <class 'lasagne.layers.dense.DenseLayer'>)], loss=None, max_epochs=20, more_params={}, objective=<class 'lasagne.objectives.Objective'>, objective_loss_function=<function categorical_crossentropy at 0x7ff89af2c500>, on_epoch_finished=(), on_training_finished=(), output_nonlinearity=<theano.tensor.nnet.nnet.Softmax object at 0x7ff89af256d0>, output_num_units=10, regression=False, update=<function nesterov_momentum at 0x7ff89968f2a8>, update_learning_rate=0.01, update_momentum=0.9, use_label_encoder=False, verbose=1, y_tensor_type=TensorType(int32, vector))
# Perform the predictions
preds = clf.predict(testX)
# and show the precision, recall and f1-score
print classification_report(testY, preds)
precision recall f1-score support 0 0.97 0.98 0.97 2274 1 0.98 0.98 0.98 2600 2 0.97 0.96 0.96 2277 3 0.96 0.96 0.96 2323 4 0.95 0.97 0.96 2233 5 0.97 0.95 0.96 2136 6 0.97 0.98 0.98 2279 7 0.98 0.96 0.97 2409 8 0.95 0.97 0.96 2279 9 0.96 0.94 0.95 2290 avg / total 0.97 0.97 0.97 23100
# Sidekick Function to display the image in python notebook
def imshow(im_title, im):
''' This is function to display the image'''
plt.figure()
plt.title(im_title)
plt.axis("off")
if len(im.shape) == 2:
plt.imshow(im, cmap = "gray")
else:
im_display = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
plt.imshow(im_display)
plt.show()
# randomly select a few of the test instances
for i in np.random.choice(np.arange(0, len(testY)), size = (10,)):
# classify the digit
pred = clf.predict(np.atleast_2d(testX[i]))
# reshape the feature vector to be a 28x28 pixel image, then change
# the data type to be an unsigned 8-bit integer
image = (testX[i] * 255).reshape((28, 28)).astype("uint8")
# show the image and prediction
imshow("Actual digit is {0}, predicted {1}".format(testY[i], pred[0]), image)