#!/usr/bin/env python # coding: utf-8 # In[1]: get_ipython().system('hostname') #

Table of Contents

#
# # K-Means Clustering # So far this semester we have been working with *supervised* and *reinforcement* learning algorithms. Another family of machine learning algorithms are *unsupervised* learning algorithms. These are algorithms designed to find patterns or groupings in a data set. No targets, or desired outputs, are involved. # ## Old Faithful Dataset # # For example, take a look at this data set of eruption durations and the waiting times in between [eruptions of the Old Faithful Geyser](https://www.kaggle.com/janithwanni/old-faithful) in Yellowstone National Park. # In[2]: import numpy as np import pandas as pd import matplotlib.pyplot as plt # In[3]: get_ipython().system('head faithful.csv') # In[4]: datadf = pd.read_csv('faithful.csv', usecols=(1, 2)) datadf # In[5]: data = datadf.values data = np.array(data) type(data), data[:10] # In[6]: plt.plot(data[:, 0], data[:, 1], '.') plt.xlabel('duration') plt.ylabel('interval') # We can clearly see two clusters here. For higher dimensional data, we cannot directly visualize the data to see the clusters. We need a mathematical way to detect clusters. This gives rise to the class of unsupervised learning methods called *clustering* algorithms. # # A simple example of a clustering algorithm is the *k-means* algorithm. It results in identifying $k$ cluster centers. It is an iterative algorithm that starts with an initial assignment of $k$ centers. Then it proceeds by determining which centers each data sample is closest to and adjusts the centers to be the means of each of these data partitions. It then repeats. # # Let's develop this algorithm one step at a time. # Each sample is the Old Faithful data has 2 attributes, so each sample is in 2-dimensional space. We know by looking at the above plot that our data nicely falls in two clusters, so we will start with $k=2$. We will initialize the two cluster centers by randomly choosing two of the data samples. # In[10]: n_samples = data.shape[0] np.random.choice(range(n_samples), 2, replace=False) # In[12]: centers = data[np.random.choice(range(n_samples), 2, replace=False), :] centers # Now we must find all samples that are closest to the first center, and those that are closest to the second sample. # In[13]: a = np.array([1, 2, 3]) b = np.array([10, 20, 30]) a, b # In[14]: a - b # But what if we want to subtract every element of `a` with every element of `b`? # In[15]: np.resize(a, (3, 3)) # In[16]: np.resize(b, (3, 3)) # In[17]: np.resize(a, (3, 3)).T # In[18]: np.resize(a, (3, 3)).T - np.resize(b, (3, 3)) # However, we can ask numpy to do this duplication for us if we reshape `a` to be a column vector and leave `b` as a row vector. # # $$ \begin{pmatrix} # 1\\ # 2\\ # 3 # \end{pmatrix} # - # \begin{pmatrix} # 10 & 20 & 30 # \end{pmatrix} # \;\; = \;\; # \begin{pmatrix} # 1 & 1 & 1\\ # 2 & 2 & 2\\ # 3 & 3 & 3 # \end{pmatrix} # - # \begin{pmatrix} # 10 & 20 & 30\\ # 10 & 20 & 30\\ # 10 & 20 & 30 # \end{pmatrix} # $$ # In[19]: a[:, np.newaxis] # In[20]: a[:, np.newaxis] - b # Now imagine that `a` is a cluster center and `b` contains data samples, one per row. The first step of calculating the distance from `a` to all samples in `b` is to subtract them component-wise. # In[21]: a = np.array([1, 2, 3]) b = np.array([[10, 20, 30], [40, 50, 60]]) print(a) print(b) # In[22]: b - a # The single row vector `a` is duplicated for as many rows as there are in `b`! We can use this to calculate the squared distance between a center and every sample. # In[23]: centers[0, :] # In[24]: sqdists_to_center_0 = np.sum((centers[0, :] - data)**2, axis=1) sqdists_to_center_0 # In[25]: sqdists_to_center_1 = np.sum((centers[1, :] - data)**2, axis=1) sqdists_to_center_1 # And, which samples are closest to the first center? # In[26]: sqdists_to_center_0 < sqdists_to_center_1 # This approach is easy for $k=2$, but what if $k$ is larger. Can we calculate all of the needed distances in one `numpy` expression? I bet we can! # In[27]: centers[:, np.newaxis, :].shape, data.shape # In[28]: (centers[:, np.newaxis, :] - data).shape # In[29]: np.sum((centers[:, np.newaxis, :] - data)**2, axis=-1).shape # In[30]: data.shape # These are the square distances between each of our two centers and each of the 272 samples. If we take the `argmin` across the two rows, we will have the index of the closest center for each of the 272 samples. # In[31]: clusters = np.argmin(np.sum((centers[:, np.newaxis, :] - data)**2, axis=2), axis=0) clusters # Now, to calculate the new values of our two centers, we just calculate the mean of the appropriate samples. # In[32]: data[clusters == 0, :].mean(axis=0) # In[33]: data[clusters == 1, :].mean(axis=0) # Can do both in a for loop. # In[35]: k = 2 for i in range(k): centers[i, :] = data[clusters == i, :].mean(axis=0) # In[36]: centers # Now, we can wrap these steps in our first version of a `kmeans` function. # In[39]: def kmeans(data, k = 2, n_iterations = 5): # Initial centers centers = data[np.random.choice(range(data.shape[0]), k, replace=False), :] # Repeat n times for iteration in range(n_iterations): # Which center is each sample closest to? closest = np.argmin(np.sum((centers[:, np.newaxis, :] - data)**2, axis=2), axis=0) # Update cluster centers for i in range(k): centers[i, :] = data[closest == i, :].mean(axis=0) return centers # In[40]: kmeans(data, 2, 5) # In[41]: kmeans(data, 2) # We need a measure of the quality of our clustering. For this, we define $J$, which is a performance measure being minimized by *k-means*. It is defined as # $$ # J = \sum_{n=1}^N \sum_{k=1}^K r_{nk} ||\mathbf{x}_n - \mathbf{\mu}_k||^2 # $$ # where $N$ is the number of samples, $K$ is the number of cluster centers, $\mathbf{x}_n$ is the $n^{th}$ sample and $\mathbf{\mu}_k$ is the $k^{th}$ center, each being an element of $\mathbf{R}^p$ where $p$ is the dimensionality of the data. $r_{nk}$ is 1 if $\mathbf{x}_n$ is closest to center $\mathbf{\mu}_k$, and 0 otherwise. # # The sums can be computed using python *for* loops, but, as you know, *for* loops are much slower than matrix operations in python, so let's do the matrix magic. We already know how to calculate the difference between all samples and all centers. # In[42]: sqdists = np.sum((centers[:,np.newaxis,:] - data)**2, axis=2) sqdists.shape # The calculation of $J$ requires us to multiply the squared differences of the each component by $r_{nk}$. Since we already have all of the squared distances, let's just sum up the minimum distances for each sample. # In[43]: np.min(sqdists, axis=0) # In[44]: np.sum(np.min(sqdists, axis=0)) # Let's define a function named *calcJ* to do this calculation. # In[45]: def calcJ(data, centers): sqdists = np.sum((centers[:, np.newaxis, :] - data)**2, axis=2) return np.sum(np.min(sqdists, axis=0)) # In[46]: calcJ(data, centers) # Now we can add this calculation to track the value of $J$ for each iteration as a kind of learning curve. $J$ measures the average "spread" within each cluster, so the smaller it is, the better. # In[51]: def kmeans(data, k, n_iterations): # Initialize centers and list J to track performance metric centers = data[np.random.choice(range(data.shape[0]), k, replace=False), :] J = [] for iteration in range(n_iterations): # Which center is each sample closest to? sqdistances = np.sum((centers[:, np.newaxis, :] - data)**2, axis=2) closest = np.argmin(sqdistances, axis=0) # Calculate J and append to list J J.append(calcJ(data, centers)) # Update cluster centers for i in range(k): centers[i, :] = data[closest == i, :].mean(axis=0) # Calculate J one final time and return results J.append(calcJ(data, centers)) return centers, J, closest # In[52]: centers, J, closest = kmeans(data, 2, 10) # In[53]: J # In[54]: plt.plot(J); # In[55]: centers, J, closest = kmeans(data, 2, 10) plt.plot(J); # In[56]: centers # In[57]: closest # In[58]: centers, J, closest = kmeans(data, 2, 2) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], s=80, c=closest, alpha=0.5) plt.scatter(centers[:, 0], centers[:, 1], s=80, c="green", marker='D') plt.subplot(1, 2, 2) plt.plot(J) centers # Let's try for more iterations. # In[59]: centers, J, closest = kmeans(data, 2, 10) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], s=80, c=closest, alpha=0.5) plt.scatter(centers[:, 0], centers[:, 1], s=80, c="green", marker='D') plt.subplot(1, 2, 2) plt.plot(J) centers # Now, how about three centers, so $k=3$? # In[60]: centers, J, closest = kmeans(data, 3, 10) plt.figure(figsize=(15, 8)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], s=80, c=closest, alpha=0.5) plt.scatter(centers[:, 0], centers[:, 1], s=80, c="green", marker='D') plt.subplot(1, 2, 2) plt.plot(J) centers # In[61]: centers, J, closest = kmeans(data, 4, 10) plt.figure(figsize=(15, 8)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], s=80, c=closest, alpha=0.5) plt.scatter(centers[:, 0], centers[:, 1], s=80, c="green", marker='D') plt.subplot(1, 2, 2) plt.plot(J) centers # Or six centers? # In[63]: centers, J, closest = kmeans(data, 6, 20) plt.figure(figsize=(15, 8)) plt.subplot(1, 2, 1) plt.scatter(data[:, 0], data[:, 1], s=80, c=closest, alpha=0.5) plt.scatter(centers[:, 0], centers[:, 1], s=80, c="green", marker='D') plt.subplot(1, 2, 2) plt.plot(J) centers # ## MNIST Dataset # # So, clustering two-dimensional data is not all that exciting. How about 784-dimensional data, such as our good buddy the MNIST data set? # In[64]: import gzip import pickle with gzip.open('mnist.pkl.gz', 'rb') as f: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') Xtrain = train_set[0] Ttrain = train_set[1].reshape((-1,1)) Xtest = test_set[0] Ttest = test_set[1].reshape((-1,1)) Xtrain.shape, Ttrain.shape, Xtest.shape, Ttest.shape # How many clusters shall we use? # In[65]: centers, J, closest = kmeans(Xtrain, k=10, n_iterations=10) # In[66]: plt.plot(J); # In[67]: centers.shape # In[68]: for i in range(10): plt.subplot(2, 5, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), cmap='gray') plt.axis('off') # Try more iterations. # In[69]: centers, J, closest = kmeans(Xtrain, k=10, n_iterations=20) plt.plot(J) plt.figure() for i in range(10): plt.subplot(2, 5, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), cmap='gray') plt.axis('off') # and more centers # In[70]: centers, J, closest = kmeans(Xtrain, k=20, n_iterations=20) plt.plot(J) plt.figure() for i in range(20): plt.subplot(4, 5, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), interpolation='nearest', cmap='gray') plt.axis('off') # Try that again. Do the cluster centers differ? # In[71]: centers, J, closest = kmeans(Xtrain, k=20, n_iterations=20) plt.plot(J) plt.figure() for i in range(20): plt.subplot(4, 5, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), interpolation='nearest', cmap='gray') plt.axis('off') # Maybe more clusters will help. Do we see expected variations for each digit? # In[72]: centers, J, closest = kmeans(Xtrain, k=40, n_iterations=20) plt.plot(J) plt.figure() for i in range(40): plt.subplot(4, 10, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), interpolation='nearest', cmap='gray') plt.axis('off') # In[73]: centers, J, closest = kmeans(Xtrain, k=40, n_iterations=20) plt.plot(J) plt.figure() for i in range(40): plt.subplot(4, 10, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), interpolation='nearest', cmap='gray') plt.axis('off') # Humm. Some of these look pretty fuzzy. Let's see how many samples are in each cluster. Show the counts in the titles. # In[74]: (closest == 0).sum() # In[75]: for i in range(40): plt.subplot(4, 10, i + 1) plt.imshow(-centers[i, :].reshape((28, 28)), interpolation='nearest', cmap='gray') plt.title(str((closest == i).sum())) plt.axis('off') # How could you use the results of the `kmeans` clustering algorithm as the first step in a classification algorithm? # # K-Nearest-Neighbor Classification # Now that we have some experience in calculating distances between samples, we are a short step away from an implementation of a common classification algorithm called *k-nearest-neighbor*. This is a non-parametric algorithm, meaning that it does not involve parameters, like weights, to make its decisions. Instead, we could call it a memory-based method. The algorithm classifies a sample by determining the $k$ closest samples in the training set and returns the most common class label among those $k$ nearest samples. # Training is terribly simple. We just have to store the training samples. Classification is also trivial to code. We just calculate squared distances between training samples and the samples being classified and return the most common class label among the $k$ closest training samples. # # Let's create a class named `KNN` to implement this algorithm. # First, let's practice our `numpy`-foo to see how to pick the most common class, with a minimum amount of code. # # Remember that `sqdists` from above is `n_centers` x `n_samples`. # # Let's try to classify the first three MNIST test samples. # In[76]: sqdists = np.sum((Xtest[:3, np.newaxis, :] - Xtrain)**2, axis=2) sqdists.shape # Okay. Now all we have to do is find the $k$ smallest distances in each row. Let's use $k=5$. # In[77]: k = 5 np.sort(sqdists[0, :])[:k] # But, we need the indices of these values so we can look up their class labels in `T`. # In[78]: k = 5 np.argsort(sqdists[0, :])[:k] # Now we have to do this for each row in `sqdists`. Or do we? Wouldn't it be nice if `np.argsort` sorts each row independently so we can do this in one function call? # In[79]: np.sort(sqdists, axis=1) # Yippee! # In[80]: np.argsort(sqdists, axis=1) # In[81]: indices = np.argsort(sqdists, axis=1) indices # In[82]: np.squeeze(Ttrain[indices, :]).shape # In[83]: plt.imshow(-Xtest[2, :].reshape(28,28), cmap='gray') plt.axis('off') # In[84]: Ttrain[indices, :][:, :, 0] # In[85]: np.unique(Ttrain[indices, :][:, :, 0][:, :40], axis=1, return_counts=True) # Cool! Now we just have to take the first $k$ columns of these and determine the most common label across the columns, for each row. We can use `scipy.stats.mode` for this! # In[86]: import scipy.stats as ss ss.mode([1, 2, 3, 4, 2, 2, 2]) #, keepdims=True) # In[87]: ss.mode(Ttrain[indices, :][:, :, 0][:, :10], axis=1) #, keepdims=True) # In[88]: Ttrain[indices, :][:, :, 0][:, :10] # In[89]: Ttest[:3] # Well, maybe we will do better with different values of $k$. # Finally, we can now define our `KNN` class. # In[90]: import numpy as np import scipy.stats as ss # for ss.mode class KNN(): def __init__(self): self.X = None # data will be stored here self.T = None # class labels will be stored here self.Xmeans = None self.Xstds = None def train(self, X, T): if self.Xmeans is None: self.Xmeans = X.mean(axis=0) self.Xstds = X.std(axis=0) self.Xstds[self.Xstds == 0] = 1 self.X = self._standardizeX(X) self.T = T def _standardizeX(self, X): return (X - self.Xmeans) / self.Xstds def use(self, Xnew, k = 1): self.k = k # Calc squared distance from all samples in Xnew with all stored in self.X sqdists = np.sum( (self._standardizeX(Xnew)[:, np.newaxis, :] - self.X)**2, axis=-1 ) # sqdists is now n_new_samples x n_train_samples # Sort each row of squared distances from smallest to largest and select the first k. indices = np.argsort(sqdists, axis=1)[:, :k] # Determine mose common class label in each row. classes = ss.mode(self.T[indices, :][:, :, 0], axis=1)[0] return classes # In[91]: knn = KNN() knn # Oh, can't have that!! # In[92]: import numpy as np import scipy.stats as ss # for ss.mode class KNN(): def __init__(self): self.X = None # data will be stored here self.T = None # class labels will be stored here self.Xmeans = None self.Xstds = None def __repr__(self): if self.X is None: return f'KNN() has not been trained.' else: return f'KNN(), trained with {self.X.shape[0]} samples having class labels {np.unique(self.T)}.' def train(self, X, T): if self.Xmeans is None: self.Xmeans = X.mean(axis=0) self.Xstds = X.std(axis=0) self.Xstds[self.Xstds == 0] = 1 self.X = self._standardizeX(X) self.T = T return self def _standardizeX(self, X): return (X - self.Xmeans) / self.Xstds def use(self, Xnew, k = 1): if self.X is None: raise Exception('KNN object has not been trained yet.') self.k = k # Calc squared distance from all samples in Xnew with all stored in self.X sqdists = np.sum( (self._standardizeX(Xnew)[:, np.newaxis, :] - self.X)**2, axis=-1 ) # sqdist # sqdists is now n_new_samples x n_train_samples # Sort each row of squared distances from smallest to largest and select the first k. indices = np.argsort(sqdists, axis=1)[:, :k] # Determine mose common class label in each row. classes = ss.mode(self.T[indices, :][:, :, 0], axis=1)[0] return classes # In[93]: knn = KNN() knn # In[94]: knn.train(Xtrain, Ttrain) # Boy, that took a long time to train! :) 200 ms. # Let's test it. First, use the default value for $k$ of 1. # In[95]: knn.use(Xtest[:3, :]) # In[96]: Ttest[:3] # Well, that worked perfectly. Let's try more test samples. # In[97]: knn.use(Xtest[:10, :]) # In[98]: Ttest[:10] # There are some mistakes. How about using more neighbors? # In[99]: plt.imshow(-Xtest[8, :].reshape(28, 28), cmap='gray') plt.axis('off') # In[100]: knn.use(Xtest[:10, :], k=7) # In[101]: Ttest[:10] # In[102]: def percent_correct(Predicted, T): return 100 * np.mean(Predicted == T) # In[103]: percent_correct(knn.use(Xtest[:10, :], k=5), Ttest[:10]) # Now we can try multiple values of $k$ with a for loop, and test all test samples. # In[91]: # pc = [] # for k in range(1, 5): # print(k, end=' ') # pc = percent_correct(knn.use(Xtest, k=k), Ttest) # pc.append([k, pc]) # Python kernel died. # # Well, here is what we often face when dealing with big data sets. K-nearest-neighbors calculates squared distances between each train and test sample. That can get huge. # # We can deal with this the typical way of working with batches of data. # In[107]: n_train = 5000 # To reduce computation time knn = KNN() knn.train(Xtrain[:n_train, :], Ttrain[:n_train, :]) batch_size = 500 n_samples = Xtest.shape[0] results = [] for k in [1, 2, 5, 10, 20]: n_correct = 0 for first in range(0, n_samples, batch_size): X = Xtest[first:first + batch_size, :] T = Ttest[first:first + batch_size, :] n_correct += np.sum(knn.use(X, k=k) == T.reshape(-1)) pc = n_correct / n_samples * 100 results.append([k, pc]) print(results[-1]) # In[108]: results # In[109]: results = np.array(results) plt.plot(results[:, 0], results[:, 1]) plt.xlabel('$k$') plt.ylabel('Percent Correct Test Data'); # How might you change the implementation of `KNN` to speed up this calculation using multiple $k$ values? # # How what might you change to speed up the calculations for a single $k$ value? (Hint: ever heard of a [kd-tree](https://johnlekberg.com/blog/2020-04-17-kd-tree.html)) # # How could you calculate class probabilities with `KNN`? # ## Comparison to Neural Network Classifier # In[110]: n = 20 X = np.random.multivariate_normal([5, 7], [[0.8, -0.5], [-0.5, 0.8]], n) X = np.vstack((X, np.random.multivariate_normal([6, 3], [[0.6, 0.5], [0.5, 0.8]], n))) T = np.vstack((np.ones((n, 1)), 2 * np.ones((n, 1)))) plt.scatter(X[:, 0], X[:, 1], c=T, s=80); # In[111]: plt.figure(figsize=(8, 8)) n = 20 X = np.random.multivariate_normal([5, 7], [[0.8, -0.5], [-0.5, 0.8]], n) X = np.vstack((X, np.random.multivariate_normal([6, 3], [[0.6, 0.5], [0.5, 0.8]], n))) T = np.vstack((np.ones((n, 1)), 2 * np.ones((n, 1)))) # Make samples as coordinates of grid points across 2-dimensional data space m = 100 xs = np.linspace(0, 10, m) ys = xs Xs, Ys = np.meshgrid(xs, ys) samples = np.vstack((Xs.ravel(), Ys.ravel())).T knn = KNN() knn.train(X, T) classes = knn.use(samples, k=1) plt.contourf(Xs, Ys, classes.reshape(Xs.shape), 1, colors=('blue','red'), alpha=0.2) plt.scatter(X[:, 0], X[:, 1], s=60, c=T); # Ooo, that's a cool plot. Let's show similar plots for the classifiers we have studied so far, including LDA, QDA, k-NN, and neural nets. # In[112]: def plot_result(X, Xs, Ys, classes): plt.contourf(Xs, Ys, classes.reshape(Xs.shape), 1, colors=('blue','red'), alpha=0.2) plt.scatter(X[:, 0], X[:, 1], s=60, c=T); # In[115]: import neuralnetworksA4 as nn import qdalda # In[116]: n = 40 X = np.random.multivariate_normal([5, 6], [[0.9, -0.2], [-0.2, 0.9]], n) X = np.vstack((X, np.random.multivariate_normal([6, 3], [[2, 0.4], [0.4, 2]], n))) T = np.vstack((np.ones((n, 1)), 2 * np.ones((n, 1)))) m = 100 xs = np.linspace(0, 10, m) ys = xs Xs,Ys = np.meshgrid(xs, ys) samples = np.vstack((Xs.ravel(), Ys.ravel())).T plt.figure(figsize=(20, 30)) # Create and train Quadratic Discriminant Analysis (QDA) # and Linear Discriminant Analysis (LDA) Classifiers qda = qdalda.QDA() qda.train(X, T) lda = qdalda.LDA() lda.train(X, T) # Create and train k-nearest-neighbor (KNN) classifier knn = KNN() knn.train(X, T) ploti = 0 # Use and plot results for LDA and QDA ploti += 1 plt.subplot(5, 3, ploti) classes = lda.use(samples) plot_result(X, Xs, Ys, classes) plt.title('LDA') ploti += 1 plt.subplot(5, 3, ploti) classes = qda.use(samples) plot_result(X, Xs, Ys, classes) plt.title('QDA') ploti += 1 # Use and plot results for KNN with various values of k for k in [1, 2, 3, 5, 10, 20]: ploti += 1 plt.subplot(5, 3, ploti) classes = knn.use(samples, k) plot_result(X, Xs, Ys, classes) plt.title(f'KNN k={k}') # Use and plot results for neural networks with various hidden layer structures for n_hiddens in [[], [1], [2], [10], [10, 10], [5, 5, 5, 5]]: ploti += 1 plt.subplot(5, 3, ploti) nnet = nn.NeuralNetworkClassifier(2, n_hiddens, 2) nnet.train(X, T, X, T, n_epochs=1000, method='scg', verbose=False) classes, _ = nnet.use(samples) plot_result(X, Xs, Ys, classes) plt.title(f'nnet {n_hiddens}') # In[ ]: