#!/usr/bin/env python # coding: utf-8 # In[3]: import numpy, scipy, matplotlib.pyplot as plt, sklearn, stanford_mir get_ipython().run_line_magic('matplotlib', 'inline') # [← Back to Index](index.html) # # Cross Validation # K-fold cross validation is a method for evaluating the correctness of a classifier. # # For example, with 10-fold cross validation: # # 1. Divide the data set into 10 random partitions. # 2. Choose one of the partitions as the test set. Train on the other nine partitions. # 3. Repeat for the partitions. # Load some features from ten kick drums and ten snare drums: # In[4]: training_features, training_labels, scaler = stanford_mir.get_features() # In[13]: print training_labels # Plot their features: # In[2]: plt.scatter(training_features[:,0], training_features[:,1]) # Initialize the classifier: # In[28]: model = sklearn.neighbors.KNeighborsClassifier(n_neighbors=3) model = sklearn.linear_model.LogisticRegression() # Perform 5-fold cross validation: # In[29]: acc = sklearn.cross_validation.cross_val_score(model, random_features, training_labels, cv=5) # In[30]: print acc.mean() # In[ ]: k # [← Back to Index](index.html)