#!/usr/bin/env python # coding: utf-8 # # Scikit-learn # Scikit-learn contains simple and efficient tools for data mining and data analysis. It implements a wide variety of machine learning algorithms and processes to conduct advanced analytics. # # Library documentation: http://scikit-learn.org/stable/ # ### General # In[1]: import numpy as np from sklearn import datasets from sklearn import svm # In[2]: # import a sample dataset and view the data digits = datasets.load_digits() print(digits.data) # In[3]: # view the target variable digits.target # In[4]: # train a support vector machine using everything but the last example classifier = svm.SVC(gamma=0.001, C=100.) classifier.fit(digits.data[:-1], digits.target[:-1]) # In[5]: # predict the target of the last example classifier.predict(digits.data[-1]) # In[6]: # persist the model and reload import pickle from sklearn.externals import joblib joblib.dump(classifier, 'model.pkl') classifier2 = joblib.load('model.pkl') classifier2.predict(digits.data[-1]) # In[7]: import os os.remove('model.pkl') # In[8]: # another example with the digits data set svc = svm.SVC(C=1, kernel='linear') svc.fit(digits.data[:-100], digits.target[:-100]).score(digits.data[-100:], digits.target[-100:]) # In[9]: # perform cross-validation on the estimator's predictions from sklearn import cross_validation k_fold = cross_validation.KFold(n=6, n_folds=3) for train_indices, test_indices in k_fold: print('Train: %s | test: %s' % (train_indices, test_indices)) # In[10]: # apply to the model kfold = cross_validation.KFold(len(digits.data), n_folds=3) cross_validation.cross_val_score(svc, digits.data, digits.target, cv=kfold, n_jobs=-1) # In[11]: # use the grid search module to optimize model parameters from sklearn.grid_search import GridSearchCV gammas = np.logspace(-6, -1, 10) classifier = GridSearchCV(estimator=svc, param_grid=dict(gamma=gammas), n_jobs=-1) classifier.fit(digits.data[:1000], digits.target[:1000]) # In[12]: classifier.best_score_ # In[13]: classifier.best_estimator_.gamma # In[14]: # run against the test set classifier.score(digits.data[1000:], digits.target[1000:]) # In[15]: # nested cross-validation example cross_validation.cross_val_score(classifier, digits.data, digits.target) # ### Other Classifiers # In[16]: # import the iris dataset iris = datasets.load_iris() # In[17]: # k nearest neighbors from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(iris.data, iris.target) # In[18]: # decision tree from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(iris.data, iris.target) # In[19]: # stochastic gradient descent from sklearn.linear_model import SGDClassifier sgd = SGDClassifier(loss="hinge", penalty="l2") sgd.fit(iris.data, iris.target) # In[20]: # naive bayes from sklearn.naive_bayes import GaussianNB gnb = GaussianNB() y_pred = gnb.fit(iris.data, iris.target).predict(iris.data) print("Number of mislabeled points : %d" % (iris.target != y_pred).sum()) # ### Regression # In[21]: # load another sample dataset diabetes = datasets.load_diabetes() # In[22]: # linear regression from sklearn import linear_model regr = linear_model.LinearRegression() regr.fit(diabetes.data, diabetes.target) # In[23]: # regression coefficients print(regr.coef_) # In[24]: # mean squared error np.mean((regr.predict(diabetes.data)-diabetes.target)**2) # In[25]: # explained variance regr.score(diabetes.data, diabetes.target) # In[26]: # ridge regression regr = linear_model.Ridge(alpha=.1) regr.fit(diabetes.data, diabetes.target) # In[27]: # lasso regression regr = linear_model.Lasso() regr.fit(diabetes.data, diabetes.target) # In[28]: # logistic regression (this is actually a classifier) iris = datasets.load_iris() logistic = linear_model.LogisticRegression(C=1e5) logistic.fit(iris.data, iris.target) # ### Preprocessing # In[29]: # feature scaling from sklearn import preprocessing X = np.array([[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]]) X_scaled = preprocessing.scale(X) # In[30]: # save the scaling transform to apply to new data later scaler = preprocessing.StandardScaler().fit(X) scaler # In[31]: scaler.transform(X) # In[32]: # range scaling min_max_scaler = preprocessing.MinMaxScaler() X_minmax = min_max_scaler.fit_transform(X) X_minmax # In[33]: # instance normalization using L2 norm X_normalized = preprocessing.normalize(X, norm='l2') X_normalized # In[34]: # category encoding enc = preprocessing.OneHotEncoder() enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]]) enc.transform([[0, 1, 3]]).toarray() # In[35]: # binning binarizer = preprocessing.Binarizer().fit(X) binarizer.transform(X) # ### Clustering # In[36]: # k means clustering from sklearn import cluster k_means = cluster.KMeans(n_clusters=3) k_means.fit(iris.data) # ### Decomposition # In[37]: # create a signal with 2 useful dimensions x1 = np.random.normal(size=100) x2 = np.random.normal(size=100) x3 = x1 + x2 X = np.c_[x1, x2, x3] # In[38]: # compute principal component analysis from sklearn import decomposition pca = decomposition.PCA() pca.fit(X) # In[39]: pca.explained_variance_ # In[40]: # only the 2 first components are useful pca.n_components = 2 X_reduced = pca.fit_transform(X) X_reduced.shape # In[41]: # generate more sample data time = np.linspace(0, 10, 2000) s1 = np.sin(2 * time) # signal 1 : sinusoidal signal s2 = np.sign(np.sin(3 * time)) # signal 2 : square signal S = np.c_[s1, s2] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # standardize data # In[42]: # mix data A = np.array([[1, 1], [0.5, 2]]) # mixing matrix X = np.dot(S, A.T) # generate observations # In[43]: # compute independent component analysis ica = decomposition.FastICA() S_ = ica.fit_transform(X) # get the estimated sources A_ = ica.mixing_.T np.allclose(X, np.dot(S_, A_) + ica.mean_)