## Accelerating Python code with Numba¶

In [1]:
import math
import random
import numpy as np
from numba import jit, vectorize, float64
import matplotlib.pyplot as plt
import seaborn
%matplotlib inline


### Random walk¶

In [2]:
def step():
return 1. if random.random() > .5 else -1.

In [3]:
def walk(n):
x = np.zeros(n)
dx = 1. / n
for i in range(n - 1):
x_new = x[i] + dx * step()
if x_new > 5e-3:
x[i + 1] = 0.
else:
x[i + 1] = x_new
return x

In [4]:
n = 100000
x = walk(n)

In [5]:
plt.plot(x)

In [6]:
%%timeit
walk(n)

Out[6]:
10 loops, best of 3: 57.6 ms per loop
In [7]:
@jit(nopython=True)
def step_numba():
return 1. if random.random() > .5 else -1.

In [8]:
@jit(nopython=True)
def walk_numba(n):
x = np.zeros(n)
dx = 1. / n
for i in range(n - 1):
x_new = x[i] + dx * step_numba()
if x_new > 5e-3:
x[i + 1] = 0.
else:
x[i + 1] = x_new
return x

In [9]:
%%timeit
walk_numba(n)

Out[9]:
The slowest run took 81.94 times longer than the fastest. This could mean that an intermediate result is being cached
1000 loops, best of 3: 1.89 ms per loop

### Universal functions¶

In [10]:
x = np.random.rand(10000000)
%timeit np.cos(2*x**2 + 3*x + 4*np.exp(x**3))

Out[10]:
1 loops, best of 3: 689 ms per loop
In [11]:
@vectorize
def kernel(x):
return np.cos(2*x**2 + 3*x + 4*np.exp(x**3))

In [12]:
kernel(1.)

Out[12]:
-0.98639139715432589
In [13]:
%timeit kernel(x)

Out[13]:
1 loops, best of 3: 324 ms per loop
In [14]:
import numexpr
%timeit numexpr.evaluate('cos(2*x**2 + 3*x + 4*exp(x**3))')

Out[14]:
10 loops, best of 3: 122 ms per loop
In [15]:
numexpr.detect_number_of_cores()

Out[15]:
4