# Some imports we'll need import numpy as np import pandas as pd julia = {'email': 'julia@jvns.ca', 'twitter': 'http://twitter.com/b0rk', 'slides': 'http://bit.ly/pycon-pandas', 'website': 'http://jvns.ca'} print 'Email:', julia['email'] print 'Twitter:', julia['twitter'] print 'Blog:', julia['website'] py_list = range(20000000) numpy_array = np.arange(20000000) %%timeit total = 0 for x in py_list: x += total * total %%timeit np.sum(numpy_array * numpy_array) import pandas as pd import numpy as np import matplotlib # display graphs inline %matplotlib inline # Make graphs prettier pd.set_option('display.max_columns', 15) pd.set_option('display.line_width', 400) pd.set_option('display.mpl_style', 'default') # Make the fonts bigger matplotlib.rc('figure', figsize=(14, 7)) matplotlib.rc('font', family='normal', weight='bold', size=22) bike_data = pd.read_csv("./2012.csv") bike_data[:5] bike_data = pd.read_csv("./2012.csv", encoding='latin1', sep=';', index_col='Date', parse_dates=True, dayfirst=True) # Get rid of missing columns bike_data = bike_data.dropna(axis=1) # Only use 3 of the columns so it all fits on the screen bike_data = bike_data[['Berri 1', u'Côte-Sainte-Catherine', 'Maisonneuve 1']] bike_data[:5] bike_data[:3] bike_data.plot() bike_data.median() bike_data.median().plot(kind='bar') # column slice column_slice = bike_data[['Berri 1', 'Maisonneuve 1']] # row slice column_slice[:3] bike_data['Berri 1'] < 75 bike_data[bike_data['Berri 1'] < 75] bike_data['weekday'] = bike_data.index.weekday bike_data.head() counts_by_day = bike_data.groupby('weekday').aggregate(np.sum) counts_by_day counts_by_day.index = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] counts_by_day['Berri 1'].plot(kind='bar') bike_data['Berri 1'].plot() def get_weather_data(year): url_template = "http://climate.weather.gc.ca/climateData/bulkdata_e.html?format=csv&stationID=5415&Year={year}&Month={month}&timeframe=1&submit=Download+Data" # mctavish station: 10761, airport station: 5415 data_by_month = [] for month in range(1, 13): url = url_template.format(year=year, month=month) weather_data = pd.read_csv(url, skiprows=16, index_col='Date/Time', parse_dates=True).dropna(axis=1) weather_data.columns = map(lambda x: x.replace('\xb0', ''), weather_data.columns) weather_data = weather_data.drop(['Year', 'Day', 'Month', 'Time', 'Data Quality'], axis=1) data_by_month.append(weather_data.dropna()) # Concatenate and drop any empty columns return pd.concat(data_by_month).dropna(axis=1, how='all').dropna() weather_data = get_weather_data(2012) weather_data[:5] bike_data['mean temp'] = weather_data['Temp (C)'].resample('D', how='mean') bike_data.head() bike_data[['Berri 1', 'mean temp']].plot(subplots=True) bike_data['Rain'] = weather_data['Weather'].str.contains('Rain').resample('D', how='mean') bike_data[['Berri 1', 'Rain']].plot(subplots=True) print 'Email:', julia['email'] print 'Twitter:', julia['twitter'] print 'Blog:', julia['website'] print 'Slides:', julia['slides']