%pylab inline from IPython.display import Image from pymc import * from numpy import ones, array # alpha ~ Normal(mu=0, std=5) alpha = Normal('alpha', mu=0, tau=1./5**2, value=0.) # an alternative way is to define a function which returns the likelihood: @stochastic def alpha(value=0, mu=0, tau=1./5**2): return normal_like(value, mu=mu, tau=tau) # beta ~ U(-inf, inf) @stochastic def beta(value=10.): return 0. # logit(theta) = a + b*x # theta = invlogit(a + b*x) dust=array([-.86,-.3,-.05,.73]) @deterministic def theta(a=alpha, b=beta, d=dust): """theta = inv_logit(a+b)""" return invlogit(a+b*d) n = 5*ones(4,dtype=int) data =array([0,1,3,5], dtype=float) # unicorns ~ Bin(N_i, theta_i) unicorns = Binomial('unicorns', n=n, p=theta, value=data, observed=True) from pymc import * from numpy import ones, array dust=array([-.86,-.3,-.05,.73]) n = 5*ones(4,dtype=int) data =array([0,1,3,5], dtype=float) # alpha ~ Normal(mu=0, std=5) alpha = Normal('alpha', mu=0, tau=1./5**2, value=0.) # beta ~ U(-inf, inf) @stochastic def beta(value=10.): return 0. # theta_i = invlogit(a + b*x_i) @deterministic def theta(a=alpha, b=beta, d=dust): """theta = inv_logit(a+b)""" return invlogit(a+b*d) # unicorns ~ binomial(n_i, theta_i) unicorns = Binomial('unicorns', n=n, p=theta, value=data, observed=True) M = MCMC([alpha, beta, theta, unicorns]) #create a graph for the model and save it to a file graph.graph(M,format='png',path='',name='unicorns_model') import scipy.misc plt.figure(figsize=(8, 8)) plt.imshow(scipy.misc.imread('unicorns_model.png')) print "alpha's parents: ", alpha.parents print "alpha's children: ", alpha.children print "alpha's value:", alpha.value print "beta's value:", beta.value print "theta's value:", theta.value print "unicorns's value:", unicorns.value beta.value = 11 print "beta's value:", beta.value print "theta's value:", theta.value print "alpha's logp: ", alpha.logp print "model's logp: ", M.logp map = MAP([alpha, beta, theta, unicorns]) map.fit() print "alpha's value:", alpha.value print "beta's value:", beta.value print "theta's value:", theta.value print "unicorns's value:", unicorns.value LD50_using_MAP = -alpha.value / beta.value print "LD50: ", LD50_using_MAP Image(filename='what_we_would_get.png') from IPython.display import YouTubeVideo M = MCMC([alpha, beta, theta, unicorns]) M.sample(10000, burn=5000) fig = plt.figure(figsize=(16,8)) Matplot.plot(alpha, new=False) M.summary() M.trace('alpha')[:] alpha.trace() fig = plt.figure(figsize=(8,8)) scatter(alpha.trace(), beta.trace()); xlabel('alpha'); ylabel('beta'); plt.figure(figsize=(8,8)) hist(-alpha.trace()/beta.trace(), 20); xlabel('LD50'); @deterministic def ld50(a=alpha, b=beta): return -a/b M2 = MCMC([alpha, beta, theta, unicorns, ld50]) M2.sample(10000, 5000) fig = plt.figure(figsize=(16,8)) Matplot.plot(ld50, new=False) print "LD50 using MAP: ", LD50_using_MAP ld50.summary() print "P(LD50 > 0): ", np.sum(ld50.trace() > 0) / float(len(ld50.trace())) fig = plt.figure(figsize=(16,8)) Matplot.plot(alpha, new=False) Image(filename='ham-mcmc.png') import pymc as pm import numpy as np from numpy.random import randn, rand class SliceStep(pm.Gibbs): """ simple slice sampler """ def __init__(self, stochastic, width = 2, maxiter = 1000, verbose = None, *args, **kwargs): """ Input: stochastic - stochastic node width - the initial width of the interval maxiter - maximum number of iteration allowed for stepping-out and shrinking """ pm.Gibbs.__init__(self, stochastic, verbose=verbose, *args, **kwargs) self.width = width self.neval = 0 self.maxiter = maxiter def step(self): stoch = self.stochastic value = stoch.value #sample vertical level z = self.logp_plus_loglike - np.random.exponential() if self.verbose>2: print self._id + ' current value: %.3f' % value print self._id + ' sampled vertical level ' + `z` #position an interval at random starting position around the current value r = self.width * np.random.rand() xr = value + r xl = xr - self.width if self.verbose>2: print 'initial interval [%.3f, %.3f]' % (xl, xr) #step out to the left iter = 0 stoch.value = xl while (self.get_logp() >= z) and (iter < self.maxiter): xl -= self.width stoch.value = xl iter += 1 assert iter < self.maxiter, "Step-out procedure failed" self.neval += iter if self.verbose>2: print 'after %d iteration interval is [%.3f, %.3f]' % (iter, xl, xr) #step out to the right iter = 0 stoch.value = xr while (self.get_logp() >= z) and (iter < self.maxiter): xr += self.width stoch.value = xr iter += 1 assert iter < self.maxiter, "Step-out procedure failed" self.neval += iter if self.verbose>2: print 'after %d iteration interval is [%.3f, %.3f]' % (iter, xl, xr) #draw a new point from the interval [xl, xr]. xp = rand()*(xr-xl) + xl stoch.value = xp #if the point is outside the interval than shrink it and draw again iter = 0 while(self.get_logp() < z) and (iter < self.maxiter): if (xp > value): xr = xp else: xl = xp xp = rand() * (xr-xl) + xl #draw again stoch.value = xp iter += 1 assert iter < self.maxiter, "Shrink-in procedure failed." self.neval += iter if self.verbose>2: print 'after %d iteration found new value: %.3f' % (iter, xp) def get_logp(self): try: return self.logp_plus_loglike except pm.ZeroProbability: return -np.inf #Set up a new MCMC model M_slice = MCMC([alpha, beta, theta, ld50]) #Set up the Slice Step Method (http://en.wikipedia.org/wiki/Slice_sampling) [M_slice.use_step_method(SliceStep, node) for node in M_slice.stochastics]; #run MCMC M_slice.sample(2000) print fig = plt.figure(figsize=(16,8)) Matplot.plot(alpha, new=False) Image(filename='comparison.png', width=800)