#examples taken from here: http://stackoverflow.com/a/1750187 mydoclist = ['Julie loves me more than Linda loves me', 'Jane likes me more than Julie loves me', 'He likes basketball more than baseball'] #mydoclist = ['sun sky bright', 'sun sun bright'] from collections import Counter for doc in mydoclist: tf = Counter() for word in doc.split(): tf[word] +=1 print tf.items() import string #allows for format() def build_lexicon(corpus): lexicon = set() for doc in corpus: lexicon.update([word for word in doc.split()]) return lexicon def tf(term, document): return freq(term, document) def freq(term, document): return document.split().count(term) vocabulary = build_lexicon(mydoclist) doc_term_matrix = [] print 'Our vocabulary vector is [' + ', '.join(list(vocabulary)) + ']' for doc in mydoclist: print 'The doc is "' + doc + '"' tf_vector = [tf(word, doc) for word in vocabulary] tf_vector_string = ', '.join(format(freq, 'd') for freq in tf_vector) print 'The tf vector for Document %d is [%s]' % ((mydoclist.index(doc)+1), tf_vector_string) doc_term_matrix.append(tf_vector) # here's a test: why did I wrap mydoclist.index(doc)+1 in parens? it returns an int... # try it! type(mydoclist.index(doc) + 1) print 'All combined, here is our master document term matrix: ' print doc_term_matrix import math def l2_normalizer(vec): denom = np.sum([el**2 for el in vec]) return [(el / math.sqrt(denom)) for el in vec] doc_term_matrix_l2 = [] for vec in doc_term_matrix: doc_term_matrix_l2.append(l2_normalizer(vec)) print 'A regular old document term matrix: ' print np.matrix(doc_term_matrix) print '\nA document term matrix with row-wise L2 norms of 1:' print np.matrix(doc_term_matrix_l2) # if you want to check this math, perform the following: # from numpy import linalg as la # la.norm(doc_term_matrix[0]) # la.norm(doc_term_matrix_l2[0]) def numDocsContaining(word, doclist): doccount = 0 for doc in doclist: if freq(word, doc) > 0: doccount +=1 return doccount def idf(word, doclist): n_samples = len(doclist) df = numDocsContaining(word, doclist) return np.log(n_samples / 1+df) my_idf_vector = [idf(word, mydoclist) for word in vocabulary] print 'Our vocabulary vector is [' + ', '.join(list(vocabulary)) + ']' print 'The inverse document frequency vector is [' + ', '.join(format(freq, 'f') for freq in my_idf_vector) + ']' import numpy as np def build_idf_matrix(idf_vector): idf_mat = np.zeros((len(idf_vector), len(idf_vector))) np.fill_diagonal(idf_mat, idf_vector) return idf_mat my_idf_matrix = build_idf_matrix(my_idf_vector) #print my_idf_matrix doc_term_matrix_tfidf = [] #performing tf-idf matrix multiplication for tf_vector in doc_term_matrix: doc_term_matrix_tfidf.append(np.dot(tf_vector, my_idf_matrix)) #normalizing doc_term_matrix_tfidf_l2 = [] for tf_vector in doc_term_matrix_tfidf: doc_term_matrix_tfidf_l2.append(l2_normalizer(tf_vector)) print vocabulary print np.matrix(doc_term_matrix_tfidf_l2) # np.matrix() just to make it easier to look at from sklearn.feature_extraction.text import CountVectorizer count_vectorizer = CountVectorizer(min_df=1) term_freq_matrix = count_vectorizer.fit_transform(mydoclist) print "Vocabulary:", count_vectorizer.vocabulary_ from sklearn.feature_extraction.text import TfidfTransformer tfidf = TfidfTransformer(norm="l2") tfidf.fit(term_freq_matrix) tf_idf_matrix = tfidf.transform(term_freq_matrix) print tf_idf_matrix.todense() from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer(min_df = 1) tfidf_matrix = tfidf_vectorizer.fit_transform(mydoclist) print tfidf_matrix.todense() new_docs = ['He watches basketball and baseball', 'Julie likes to play basketball', 'Jane loves to play baseball'] new_term_freq_matrix = tfidf_vectorizer.transform(new_docs) print tfidf_vectorizer.vocabulary_ print new_term_freq_matrix.todense() import os import csv #os.chdir('/Users/rweiss/Dropbox/presentations/IRiSS2013/text1/fileformats/') with open('amazon/sociology_2010.csv', 'rb') as csvfile: amazon_reader = csv.DictReader(csvfile, delimiter=',') amazon_reviews = [row['review_text'] for row in amazon_reader] #your code here!!!