#!/usr/bin/env python # coding: utf-8 # # Analysis of the clusters # # Here is a streamlined code that shows just analysis without blind alleys I encountered before. # In[1]: #general import numpy as np import scipy from matplotlib import pyplot as plt get_ipython().run_line_magic('pylab', 'inline') import pandas as pd import MySQLdb import os import sys sys.setrecursionlimit(3000) # In[4]: con=MySQLdb.connect(user=user, passwd=passwd, db=dbname, host=host) df = pd.read_sql("SELECT * FROM business WHERE state ='AZ' ", con) print "data loaded" # The low activity of some businesses in their particular cluster caused their Location Quotient to be marked as inf because of the computing roundup.
# The trick is with the LQ equation. Here you can see the equation again.
# # # $$LQ_{ij}=\frac{\frac{E_{ij}}{E_i}}{\frac{\sum_i E_{ij}}{\sum_i E_i}}$$ # Where # $E_{ij}$ is economic activity in subarea i, department j # $E_i$ is total economic activity in subarea i # $\sum_i E_{ij}$ is economic activity of department j in the whole area # $\sum_i E_i$ is total economic activity in the whole area # So when a business has low activity, the real value of nominator is of an order of $10^{-6}$. That is small enough number that my Python code on computer decides that the value is so close to the zero and thus can be zero; causing division to produce infinity. This creates a problem to me since, in reality, those businesses are not the most popular, they are the least popular in the cluster.
# Therefore, I decided to replace all infinity values with the minimum value of Location Quotient. Yes, this will assign a higher value of LQ to those unpopular businesses, but in clusters that have more than four business each, such low visited businesses do not even come into future calculations. # In[4]: i=0 for t in df.LQ: if t == np.inf: df.LQ.loc[i]=df.LQ.min() i=i+1 # Let us see individual cluster. # In[9]: lqCluster=df[df.cluster ==1] plt.figure(figsize=(10, 10), dpi=100) df_scatter = plt.scatter(lqCluster['longitude'], lqCluster['latitude'], c='b', alpha=.5, s=lqCluster['LQ']*10) plt.title( 'LQ in cluster', fontsize=20) plt.xlabel('Longitude', fontsize=18) plt.ylabel('Latitude', fontsize =18) plt.xlim(lqCluster.longitude.min()-0.002,lqCluster.longitude.max()+0.002) plt.ylim(lqCluster.latitude.min()-0.002,lqCluster.latitude.max()+0.002) plt.show() # ## Determining the enviroment # # # Now is time to determine which businesses are carrying economic activity in each cluster.
# I also wish to see how those categories relate to the most common category of the cluster. # I used multiple comparisons, so that needs to be corrected. I used a method called: # # ##Sidak correction # # It is a simple method to control the familywise error rate that is probabilistically exact when the individual tests are independent of each other but is conservative otherwise. # The test is used if the test statistics are independent of each other then testing each of m hypotheses at level $$\alpha_{SID} = 1-(1-\alpha)^\frac{1}{m}$$ is Sidak's multiple testing procedure. # This test is more powerful than Bonferroni, but the gain is small: for $\alpha_{SID} = 0.05$ and $m= 10$ and $10^{12}$, Bonferroni vs Sidak give 0.005 and 5 $10^{-14}$ vs 0.005116 and $5.129 10^{-14}$, respectively. The main merit of the correction is that it is exact probabilistically when the tests are independent of each other. Bonferroni is an easier approximate way to calculate the Sidak correction. # # The Šidák correction is derived by assuming that the individual tests are independent. Let the significance threshold for each test be $\alpha_1$; then the probability that at least one of the tests is significant under this threshold is (1 - the probability that none of them is significant). Since it is assumed that they are independent, the probability that all of them are not significant is the product of the probabilities that each of them are not significant, or $1 - (1 - \alpha_1)^n$. Our intention is for this probability to equal \alpha, the significance level for the entire series of tests. By solving for $\alpha_1$, we obtain $\alpha_1 = 1 - (1 - \alpha)^{1/n}$. # ###Helper functions # In[7]: def getTopCategories(newcat,listCat,pomDF): busi= pomDF.name.ix[pomDF.categories == newcat].values if len(busi)>0: for b in busi: rcat = pom.categories.ix[pom.name == b].values if len(rcat)>0: for t in rcat: if t in listCat: pass else: listCat.append(t) return listCat # In[8]: def SortingBusinessCategories(categ,pomDF): busR = {} for rc in categ: rcat=rc.flatten().tolist()[0] busR.update({rcat: pomDF.name[pomDF.categories == rcat].count()}) sortedBus = sorted(busR.items(), key=operator.itemgetter(1), reverse=True) return sortedBus # In[9]: def getCategoryStats(pomDF, categ): RcatList= arrayToList(categ) pom2 = pomDF.loc[pomDF['categories'].isin(RcatList)] Large = pom2.LQ.max() Bus = pom2.name.ix[pom2.LQ == Large].values[0] BusCat = pom2.categories.ix[(pom2.name == Bus) & (pom2.LQ == Large)].values[0] CatNum = pom2.name[pom2.categories == BusCat].count() answer=[] answer.append(Large) answer.append(BusCat) answer.append(CatNum) return answer # In[10]: def arrayToList(array): i=0 newlist=[] for r in array: if type(r)==np.ndarray: st=r[0] newlist.append(st) return newlist # ## Performing analysis for some other business in the cluster # # However, I cannot answer the question where to put stand-alone business, not with just Yelp data. To answer that question, I would need an additional data source.
# The question I can answer is can some business play well together.
# So, now, I'll concentrate on business that cluster around top business. # I'll pick experimental category and run with it.
# You can see other examples I tried. I have to give up on those, mostly because there was no more than five different clusters where that business appeared. In that case, I would recommend not to use this analysis as determination, but different factors, different data. # In[13]: #print df.categories.unique() targetCategory ="Coffee & Tea"#"Bookstores" # "Taxis" #"Rugs"# if targetCategory in df.categories.unique(): print 'yeah' # OK, so there is something connected with Coffee.
# Next step let us see how many clusters have this category. # In[14]: pom=df[df['categories']== targetCategory] pom2=pom.cluster.unique().tolist() #print pom2 clustersDF = df.loc[df['cluster'].isin(pom2)] targetCategoryClusters=clustersDF.cluster.unique() print len(targetCategoryClusters) #print clustersDF.cluster.unique() Neat, over 100 clusters. Next step is to determine the top four businesses in each category and get LQ of targeted category for each cluster. # In[15]: df3=pd.DataFrame(df.cluster.unique()) df3['BusNum']=0 df3['topBusCat']='' df3['topCatNum']=0 df3['LQmax']=0 df3['cat1']='' df3['LQ2']=0 df3['cat1num']=0 df3['cat2']='' df3['LQ3']=0 df3['cat2num']=0 df3['cat3']='' df3['LQ4']=0 df3['cat3num']=0 df3['targetLQ']=0 df3 = df3.rename(columns={0: 'cluster'}) # In[16]: import operator for c in clustersDF.cluster.unique(): pom = clustersDF[clustersDF.cluster == c] large = pom.LQ.max() topBus = pom.name.ix[pom.LQ == large].values[0] topBusCat = pom.categories.ix[(pom.name == topBus) & (pom.LQ == large)].values[0] df3.topBusCat.loc[df3.cluster == c] = topBusCat df3.LQmax.loc[df3.cluster == c]=large bNum=pom.name.unique() df3.BusNum.loc[df3.cluster == c]=len(bNum) topCatStart=[] topCat = getTopCategories(topBusCat, topCatStart, pom) topCatNum =pom.name[pom.categories == topBusCat].count() df3.topCatNum.loc[df3.cluster ==c]=topCatNum df3.targetLQ.loc[df3.cluster ==c]=pom.LQ[(pom.categories == targetCategory) & (pom.name != topBus)].max() cat = pom.categories.unique() Rcat = cat[np.argwhere(np.in1d(cat,np.intersect1d(cat,topCat))==False)] if len(Rcat)>0: ans=getCategoryStats(pom,Rcat) df3.cat1.loc[df3.cluster == c] = ans[1] df3.cat1num.loc[df3.cluster == c] = ans[2] df3.LQ2[df3.cluster == c] = ans[0] topCate = getTopCategories(ans[1], topCat, pom) Rcat1 = cat[np.argwhere(np.in1d(cat,np.intersect1d(cat,topCate))==False)] if len(Rcat1)>0: ans2=getCategoryStats(pom,Rcat1) df3.cat2.loc[df3.cluster == c] = ans2[1] df3.cat2num.loc[df3.cluster == c] = ans2[2] df3.LQ3[df3.cluster == c] = ans2[0] topCateg = getTopCategories(ans2[1], topCate, pom) Rcat2 = cat[np.argwhere(np.in1d(cat,np.intersect1d(cat,topCateg))==False)] if len(Rcat2)>0: ans3=getCategoryStats(pom,Rcat2) df3.cat3.loc[df3.cluster == c] = ans3[1] df3.cat3num.loc[df3.cluster == c] = ans3[2] df3.LQ4[df3.cluster == c] = ans3[0] df3.head(5) # Let us determine is our category in one of the 4 four here. If it is, then we can proceed with the analysis. # In[17]: allCatIndf3=df3.topBusCat.unique().tolist()+df3.cat1.unique().tolist()+df3.cat2.unique().tolist()+df3.cat3.unique().tolist() uniqCatDF3= set(allCatIndf3) if targetCategory in df.categories.unique(): print 'yeah' # The goal is to find business that preform well. When business is in one off the four most popular in the cluster, it si more probable that I will find a positive influence in that cluster. Taking all clusters with targeted business in consideration will drown the signal with the noise. # In[18]: import operator from scipy.stats import kendalltau par=[] for c in df3.topBusCat.unique(): if c == targetCategory: par.append(df3.topBusCat[df3['topBusCat']==c].tolist()[0]) for c in df3.cat1.unique(): if c == targetCategory: par.append(df3.topBusCat[df3['cat1']==c].tolist()[0]) for c in df3.cat2.unique(): if c == targetCategory: par.append(df3.topBusCat[df3['cat2']==c].tolist()[0]) for c in df3.cat3.unique(): if c == targetCategory: par.append(df3.topBusCat[df3['cat3']==c].tolist()[0]) print par # Now with these categories, we'll find clusters where there are target category and one of the listed categories and tests with Spearman correlation how they influence coffee shops. # # #Spearman's rank correlation coefficient # # It is often denoted by the Greek letter $\rho$ (rho) or as $r_s$, is a nonparametric measure of statistical dependence between two variables. It assesses how well the relationship between two variables can be described using a monotonic function. If there are no repeated data values, a perfect Spearman correlation of +1 or −1 occurs when each of the variables is a perfect monotone function of the other. # # Spearman's coefficient, like any correlation calculation, is appropriate for both continuous and discrete variables, including ordinal variables. Spearman's $\rho$ and Kendall's $\tau$ can be formulated as special cases of a more a general correlation coefficient. # # The Spearman correlation coefficient is defined as the Pearson correlation coefficient between the ranked variables.For a sample of size n, the n raw scores $X_i$, $Y_i$ are converted to ranks $x_i$, $y_i$, and $\rho$ is computed from: # # $$\rho = {1- \frac {6 \sum d_i^2}{n(n^2 - 1)}}$$ # where $d_i = x_i - y_i$, is the difference between ranks. # # And for those who do not know: # # ##Pearson product-moment correlation coefficient # # The Pearson product-moment correlation coefficient (sometimes referred to as the PPMCC or PCC or Pearson's r) is a measure of the linear correlation (dependence) between two variables X and Y, giving a value between +1 and −1 inclusive, where 1 is total positive correlation, 0 is no correlation, and −1 is total negative correlation. It is widely used in the sciences as a measure of the degree of linear dependence between two variables. # Pearson's correlation coefficient is the covariance of the two variables divided by the product of their standard deviations. The form of the definition involves a "product moment", that is, the mean (the first moment about the origin) of the product of the mean-adjusted random variables; hence the modifier product-moment in the name. # Pearson's correlation coefficient when applied to a population is commonly represented by the Greek letter $\rho$ (rho) and may be referred to as the population correlation coefficient or the population Pearson correlation coefficient. The formula is: # # $$\rho_{X,Y}= \frac{\operatorname{cov}(X,Y)}{\sigma_X \sigma_Y} $$ # where: # $\operatorname{cov}$ is the covariance # $\sigma_X$ is the standard deviation of X # In[19]: resul=pd.DataFrame(range(100)) resul['category']='' resul['Spearman']=0. resul['P']=0. resul['sidak']=0. # In[20]: import operator from scipy.stats import spearmanr i=0 for ca in par: tarClustDF=clustersDF[clustersDF['categories']==ca] pomList=tarClustDF.cluster.unique().tolist() clustersDF2 = clustersDF.loc[clustersDF['cluster'].isin(pomList)] if len(tarClustDF)>5: para=[] resu=[] for c in pomList: para.append(clustersDF2.LQ[(clustersDF2['categories']==ca) & (clustersDF2['cluster']==c) ].max()) resu.append(clustersDF2.LQ[(clustersDF2['categories']==targetCategory) & (clustersDF2['cluster']==c)].max()) cou=len(par) spear=spearmanr(para,resu) sida=1.-(1.-spear[1])**cou resul.category[i]=ca resul.Spearman[i]=spear[0] resul.P[i]=spear[1] resul.sidak[i]=sida i+=1 print ca, '->', spear[0],'p=',spear[1],'Sidak correction:', sida # In[21]: result=resul[resul['sidak'] < 0.1] # ###Validation # # Using the same random pick of the categories # In[106]: import random category = random.sample(df.LQ,15) LQrandom = random.sample(df.LQ, 15) spear = spearmanr(category, LQrandom) print spear[0], spear[1] # Shows there is no correlation between randomly chosen categories. Correlation is close to 0, and the P value is way too high. # ##let's make a map # # First we need to sort out which Spearman coefficient is significant, larger than 0.5 and then sort which one is negative and which one is positive. # In[22]: negCat=result.category[result['Spearman']<-0.5].tolist() posCat=result.category[result['Spearman']>0.5].tolist() # Then we have to get clusters that have a positive influence business and do not have negative influence business.
# In case that one of the categories is empty, it is skipped. The rest of the clusters are treated as neutral. Meaning, there, a personal performance of a business, together with unaccounted for factors will have the greatest influence. # In[23]: allClust=df.cluster.unique() goodList=[] neutralList=[] if len(posCat)>0: goodf= df.loc[df['categories'].isin(posCat)] good=goodf.cluster.unique()#.tolist() withGood =allClust[np.argwhere(np.in1d(allClust,np.intersect1d(allClust,good))==True)] goodList=arrayToList(withGood) if len(negCat)>0: badf= df.loc[df['categories'].isin(negCat)] bad=badf.cluster.unique()#.tolist() withoutBad = allClust[np.argwhere(np.in1d(allClust,np.intersect1d(allClust,bad))==False)] neutralList=arrayToList(withoutBad) # Now we get coordinates for the good and neutral clusters. # In[24]: if (len(goodList)==0) & (len(neutralList)==0): print 'No good places for business. Try put it as stand alone business.' if len(goodList)>0: coordGood = pd.DataFrame(goodList) coordGood = coordGood.rename(columns={0: 'cluster'}) coordGood['lati']=0 coordGood['longi']=0 coordGood['ratioNbus']=0 coordGood['LQmean']=0 coordGood['scale']=0 if len(neutralList)>1: coordNeutral = pd.DataFrame(neutralList) coordNeutral = coordNeutral.rename(columns={0: 'cluster'}) coordNeutral['lati']=0 coordNeutral['longi']=0 coordNeutral['ratioNbus']=0 coordNeutral['LQmean']=0 coordNeutral['scale']=0 # In[25]: from __future__ import division if len(goodList)>0: for c in goodList: pom4= df[df.cluster == c] NumBus=len(pom4.name.unique()) NumGoodBus = pom4.name[pom4.categories == cat[0]].count() ratio=NumGoodBus/NumBus meanLQ = pom4.LQ[pom4.categories==cat[0]].mean() lati =pom4.latitude.mean() longi =pom4.longitude.mean() scal = ratio*meanLQ coordGood.lati.loc[coordGood.cluster == c] = lati coordGood.longi.loc[coordGood.cluster == c] = longi coordGood.ratioNbus.loc[coordGood.cluster == c] = ratio coordGood.LQmean.loc[coordGood.cluster == c] = meanLQ coordGood.scale.loc[coordGood.cluster == c] = scal if len(neutralList)>1: for c in neutralList: pom4= df[df.cluster == c] NumBus=len(pom4.name.unique()) NumGoodBus = pom4.name[pom4.categories == cat[0]].count() ratio=NumGoodBus/NumBus meanLQ = pom4.LQ[pom4.categories==cat[0]].mean() lati =pom4.latitude.mean() longi =pom4.longitude.mean() scal = ratio*meanLQ coordNeutral.lati.loc[coordNeutral.cluster == c] = lati coordNeutral.longi.loc[coordNeutral.cluster == c] = longi coordNeutral.ratioNbus.loc[coordNeutral.cluster == c] = ratio coordNeutral.LQmean.loc[coordNeutral.cluster == c] = meanLQ coordNeutral.scale.loc[coordNeutral.cluster == c] = scal # Now we can plot the result. Let us first check the sanity of our coordinates. # In[26]: print goodList, neutralList # In[27]: def intersect(a,b): return list(set(a) & set(b)) # In[28]: figsize(15, 3) if len(goodList)>0: tornTupleG = coordGood.cluster.tolist() latsG = coordGood.lati.tolist() lonsG = coordGood.longi.tolist() #scalesG = coordGood.scale.tolist() subplot(141) title("Distribution of good Latitudes"); hist(latsG, bins=20); subplot(142) title("Distribution of good Longitudes"); hist(lonsG, bins=20); if len(neutralList)>0: tornTupleN = coordNeutral.cluster.tolist() latsN = coordNeutral.lati.tolist() lonsN = coordNeutral.longi.tolist() #scalesN = coordNeutral.scale.tolist() subplot(141) title("Distribution of neutral Latitudes"); hist(latsN, bins=20); subplot(142) title("Distribution of neutral Longitudes"); hist(lonsN, bins=20); # Let us plot the result using Folium. # In[29]: from IPython.display import HTML import folium def inline_map(map): """ Embeds the HTML source of the map directly into the IPython notebook. This method will not work if the map depends on any files (json data). Also this uses the HTML5 srcdoc attribute, which may not be supported in all browsers. """ map._build_map() return HTML(''.format(srcdoc=map.HTML.replace('"', '"'))) # In[30]: meanlat=df.latitude.mean() meanlong=df.longitude.mean() map = folium.Map(width=600,height=600,location=[meanlat,meanlong], zoom_start=10) if len(goodList)>0: for i in range(len(tornTupleG)): map.simple_marker([latsG[i], lonsG[i]], popup=str(tornTupleG[i])+' Reccomended',marker_color='green',marker_icon='ok-sign') if len(neutralList)>0: for i in range(len(tornTupleG)): map.simple_marker([latsN[i], lonsN[i]], popup=str(tornTupleN[i])+' Neutral',marker_color='blue',marker_icon='ok-sign') inline_map(map) # In[34]: i=92 hpom=df[df.cluster == i] large = hpom.LQ.max() topBus = hpom.name.ix[hpom.LQ == large].values[0] topBusCat = hpom.categories.ix[(hpom.name == topBus) & (hpom.LQ == large)].values[0] bus=hpom.categories.unique() print i, topBusCat print bus # In[ ]: