mat = numpy.random.random_sample((600, 600))
mat = numpy.asfortranarray(mat)
from IPython.parallel import Client, require, interactive
rc = Client()
dv = rc.direct_view()
lv = rc.load_balanced_view()
@require("numpy")
@interactive
def simple_inner(i):
column = mat[:, i]
# have to use a list comprehension to prevent closure
return sum([numpy.inner(column, mat[:, j]) for j in xrange(i + 1, mat.shape[1])])
Local, serial performance.
%timeit sum(simple_inner(i) for i in xrange(mat.shape[1] - 1))
1 loops, best of 3: 720 ms per loop
dv.push(dict(mat=mat), block=True);
Parallel implementation using a DirectView
.
%timeit sum(dv.map(simple_inner, range(mat.shape[1] - 1), block=False))
1 loops, best of 3: 1.52 s per loop
Parallel implementation using a LoadBalancedView
with a large chunksize
and unordered results.
%timeit sum(lv.map(simple_inner, range(mat.shape[1] - 1), ordered=False, chunksize=(mat.shape[1] - 1) // len(lv), block=False))
1 loops, best of 3: 1.2 s per loop
Using two indices takes even more time due to additional communication.
@require("numpy")
@interactive
def inner(i, j):
return numpy.inner(mat[:, i], mat[:, j])
first = [i for i in xrange(mat.shape[1] - 1) for j in xrange(i + 1, mat.shape[1])]
second = [j for i in xrange(mat.shape[1] - 1) for j in xrange(i + 1, mat.shape[1])]
%timeit sum(dv.map(inner, first, second, block=False))
1 loops, best of 3: 2.79 s per loop
%timeit sum(lv.map(inner, first, second, unordered=True, chunksize=len(first) // len(lv), block=False))
1 loops, best of 3: 2.74 s per loop