#!/usr/bin/env python # coding: utf-8 # In[89]: from __future__ import print_function get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt import numpy as np from scipy import constants import seaborn as sns sns.set_style("darkgrid") import scipy # #Table of Contents # * [Generate test data](#Generate-test-data) # * [Fit the test data to a sin function using bayes interference](#Fit-the-test-data-to-a-sin-function-using-bayes-interference) # # # Generate test data # In[90]: alpha = 1.4 f = lambda x : np.sin(alpha * x) # In[91]: x = np.linspace(0, np.pi, 8) x_fine = np.linspace(-.5,np.pi +.5,1000) y = np.sin(1.4*x) # In[92]: #plot sns.plt.plot(x,y, 'o', zorder=2) sns.plt.plot(x_fine, f(x_fine), zorder=1) plt.ylim(-1.1, 1.1) plt.xlim(min(x_fine), x_fine.max()); plt.title('original curve with points'); # For each dot here generate an ensemble of possible measurments. They will be distributed with a simple gaussian first in this example. # In[93]: sigma = .01 measurements = np.array([np.random.normal(mu, scale=sigma, size=15) for mu in y]) # In[94]: #measurements comparison with original curve plt.plot(x, measurements, 'o', c='indianred') plt.plot(x_fine, f(x_fine)) plt.title('Measurements'); plt.xlim(min(x_fine), max(x_fine)); # # Fit the test data to a sin function using bayes interference # In[95]: import pymc as pm # There are several unkowns in this example now. First the two variables $\alpha$ and $\phi$ from the original sin curve and then $\sigma$ the standard deviation of the error for each measurement. # In[96]: alpha = pm.Normal('alpha', 3, 1) tau = pm.Normal('tau', 5, 10) # In[97]: obs = [pm.CommonDeterministics.Lambda('obs_{}'.format(i), lambda alpha=alpha : np.sin(alpha*xx)) for i, xx in enumerate(x)] # In[98]: noises = [pm.Normal('noise_{}'.format(i), o, tau, value=m, observed=True) for i, (m, o) in enumerate(zip(measurements, obs))] # In[99]: var_list = [tau, alpha] var_list.extend(obs) var_list.extend(noises) # In[100]: model = pm.Model(var_list) # In[101]: map_ = pm.MAP(model) map_.fit() # In[102]: mcmc = pm.MCMC(model) mcmc.sample(100000, 90000) # In[103]: pm.Matplot.plot(mcmc.trace("alpha")) # In[104]: pm.Matplot.plot(mcmc.trace("tau")) # ##Look at plot of estmated values # In[105]: from scipy.stats.mstats import mquantiles # In[106]: alpha_ = mcmc.trace('alpha')[:] alpha_mean = np.mean(alpha_) alpha_qs = mquantiles(alpha_, [.025, .975]) # In[107]: #plot CI for alpha value plt.fill_between(x_fine, np.sin(alpha_qs[0]*x_fine), y2=np.sin(alpha_qs[1]*x_fine), color='#7A68A6') plt.plot(x_fine,np.sin(alpha_qs[0]*x_fine), color='#7A68A6', label='95% CI') plt.plot(x_fine, np.sin(1.4*x_fine), label='input', zorder=1, c='indianred') plt.plot(x, measurements, 'o',zorder=-1, color='blue') plt.ylim(-1.4, 1.4) plt.legend(loc='lower right') # In[108]: betas = 1 / mcmc.trace('tau')[:] betas_mean = np.mean(betas) betas_qs = mquantiles(betas, [.025,.975]) # In[109]: # widest and narrowst error distribution def plot_error_distri(idx): mu = np.sin(1.4*x[idx]) m = measurements[idx] xx = np.linspace(mu-1, mu+1,100) plt.plot(xx, scipy.stats.norm.pdf(xx, mu, betas.max()), label='max') plt.plot(xx, scipy.stats.norm.pdf(xx, mu, betas.min()), label='min') plt.plot(xx, scipy.stats.norm.pdf(xx, mu, betas.mean()), label='mean') plt.plot(xx, scipy.stats.norm.pdf(xx, mu, .2), label='Input') plt.plot(m, np.ones(len(m))*.1, 'o') plt.xlim(mu-1, mu+1) plt.legend(); plt.title('idx = {}'.format(idx)) plt.figure(figsize=(10,10)) for i in range(len(measurements)): plt.subplot(4,2,i+1) plot_error_distri(i) plt.tight_layout();