plt.figure(figsize=(8,6))
ax = model.plot_phase_diagram(20, cmap='winter', arrows=True)[0]
# plot the unstable manifold
init = [k_star - 1e-5, c_star + 1e-5]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.5, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], 'r--', label='$M_U$',)
init = [k_star + 1e-5, c_star - 1e-5]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.5, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], 'r--')
# plot the stable manifold
optimal_traj = model.solve_reverse_shooting(0.1 * k_star, h=1.0, eps=1e-5)
ax.plot(optimal_traj[:,1], optimal_traj[:,2], 'r', label='$M_S$')
optimal_traj_2 = model.solve_reverse_shooting(6.0 * k_star, h=1.0, eps=1e-5)
ax.plot(optimal_traj_2[:,1], optimal_traj_2[:,2], 'r')
ax.set_title('Phase diagram for the Ramsey (1928) model', fontsize=20)
ax.legend(loc='best', frameon=False, prop={'family':'serif'})
# plot two trajectories
init = 0.999 * optimal_traj[-1,1:]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.5, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], color='0.5', label='')
# plot two trajectories
init = 1.001 * optimal_traj_2[-1,1:]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.5, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], color='0.5', label='')
#plt.savefig('graphics/ramsey-phase-diagram-with-manifolds.pdf')
plt.show()
plt.figure(figsize=(8,6))
x_min = k_star - 0.1 * c_star
x_max = k_star + 0.1 * c_star
y_min = (1 - 0.1) * c_star
y_max = (1 + 0.1) * c_star
ax = model.plot_phase_diagram(166)[0]
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
X,Y = np.meshgrid(np.linspace(x_min, x_max, 10), np.linspace(y_min, y_max, 10))
U = equation_of_motion_capital(0, np.array([X, Y]), model.args)
V = consumption_euler_equation(0, np.array([X, Y]), model.args)
Q = plt.quiver(X, Y, U, V)
l,r,b,t = axis()
dx, dy = r-l, t-b
ax.axis([l-0.05*dx, r+0.05*dx, b-0.05*dy, t+0.05*dy])
plt.title('Minimal arguments, no kwargs')
#plt.savefig('Graphics/ramsey-model-quiver-ploy.pdf')
<matplotlib.text.Text at 0x9c38c30>
c_star
1.3898982978743852
ramsey_analytic_consumption(k_star, 100, model.args)
1.3898982978743852
We can generate an analytic trajectory for our Solow model like so...
params = {'alpha':0.33, 'delta':0.04, 'g':0.01, 'n':0.025, 'theta':3.0}
params['rho'] = get_rho(params)
params
{'alpha': 0.33, 'delta': 0.04, 'g': 0.01, 'n': 0.025, 'rho': 0.004250000000000004, 'theta': 3.0}
# create the model object
model = growth.RamseyModel(output=cobb_douglas_output,
k_dot=equation_of_motion_capital,
c_dot=consumption_euler_equation,
F=ramsey_system,
jacobian=ramsey_jacobian,
params=params)
# compute the steady state values
model.steady_state.set_functions({'k_star':analytic_k_star, 'c_star':analytic_c_star})
model.steady_state.set_values()
# evaluate the jacobian
k_star = model.steady_state.values['k_star']
c_star = model.steady_state.values['c_star']
eval_jac = ramsey_jacobian(0, [k_star, c_star], params)
model.steady_state.set_eigen_star(eval_jac)
# create the grid and compute the benchmark policy function
k_min = 0.5 * k_star
k_max = 2.0 * k_star
plot_grid = np.linspace(k_min, k_max, 1000)
for tol in [1e-1, 1e-2, 1e-3, 1e-4]:
for h in [1e0, 1e-1, 1e-2]:
# solve the model using forward shooting
kwargs = {'rtol':1e-9}
tmp_M_S = model.solve_forward_shooting(k_min, h, tol, integrator='dopri5', **kwargs)
# compute the analytic policy at the same time pts.
tmp_analytic_M_S = ramsey_analytic_solution(k_min, tmp_M_S[:,0], model.args)
# compute the average approximation error
tmp_L2_error = model.get_L2_errors(tmp_M_S, tmp_analytic_M_S)
print 'L^2 error for tol=%g and h=%g using forward shooting is %g' %(tol, h, tmp_L2_error)
# estimate the run-time
%timeit -n 1 -r 3 model.solve_forward_shooting(k_min, h, tol, integrator='dopri5', **kwargs)
print ''
L^2 error for tol=0.1 and h=1 using forward shooting is 1.21573 1 loops, best of 3: 43.4 ms per loop L^2 error for tol=0.1 and h=0.1 using forward shooting is 3.26052 1 loops, best of 3: 210 ms per loop L^2 error for tol=0.1 and h=0.01 using forward shooting is 10.2344 1 loops, best of 3: 2.08 s per loop L^2 error for tol=0.01 and h=1 using forward shooting is 0.439963 1 loops, best of 3: 118 ms per loop L^2 error for tol=0.01 and h=0.1 using forward shooting is 1.33427 1 loops, best of 3: 783 ms per loop L^2 error for tol=0.01 and h=0.01 using forward shooting is 4.19214 1 loops, best of 3: 7.69 s per loop L^2 error for tol=0.001 and h=1 using forward shooting is 0.683125 1 loops, best of 3: 129 ms per loop L^2 error for tol=0.001 and h=0.1 using forward shooting is 2.01904 1 loops, best of 3: 804 ms per loop L^2 error for tol=0.001 and h=0.01 using forward shooting is 6.36917 1 loops, best of 3: 8.27 s per loop L^2 error for tol=0.0001 and h=1 using forward shooting is 0.683125 1 loops, best of 3: 134 ms per loop L^2 error for tol=0.0001 and h=0.1 using forward shooting is 1.34184 1 loops, best of 3: 2.64 s per loop L^2 error for tol=0.0001 and h=0.01 using forward shooting is 4.22663 1 loops, best of 3: 27.4 s per loop
for tol in [1e-5, 1e-6, 1e-7, 1e-8, 1e-9]:
# solve the model using forward shooting
kwargs = {'rtol':1e-9}
tmp_M_S = model.solve_forward_shooting(k_min, 1.0, tol, integrator='dopri5', **kwargs)
# compute the analytic policy at the same time pts.
tmp_analytic_M_S = ramsey_analytic_solution(k_min, tmp_M_S[:,0], model.args)
# compute the average approximation error
tmp_L2_error = model.get_L2_errors(tmp_M_S, tmp_analytic_M_S)
print 'L^2 error for tol=%g and h=%g using forward shooting is %g' %(tol, 1.0, tmp_L2_error)
# estimate the run-time
%timeit -n 1 -r 3 model.solve_forward_shooting(k_min, h, tol, integrator='dopri5', **kwargs)
print ''
L^2 error for tol=1e-05 and h=1 using forward shooting is 0.00850839 1 loops, best of 3: 24.1 s per loop L^2 error for tol=1e-06 and h=1 using forward shooting is 0.00013436 1 loops, best of 3: 1min 45s per loop L^2 error for tol=1e-07 and h=1 using forward shooting is 0.00016378 1 loops, best of 3: 1min 48s per loop L^2 error for tol=1e-08 and h=1 using forward shooting is 6.03991e-06 1 loops, best of 3: 5min 3s per loop L^2 error for tol=1e-09 and h=1 using forward shooting is 5.32365e-07 1 loops, best of 3: 6min 35s per loop
model.solve_multiple_shooting??
def bc(veca, vecb):
"""
Boundary conditions for the Ramsey model.
Args:
veca: (array) Values of the dependent variables at the left
boundary.
vecb: (array) Values of the dependent variables at the right
boundary.
Returns:
bc: (tuple) Two arrays containing the difference between the
current and required values of the boundary conditions.
"""
left_boundary = np.array([veca[0] - k_min])
right_boundary = np.array([vecb[1] - c_star])
bc = (left_boundary, right_boundary)
return bc
guess = (k_star, c_star)
for tol in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]:
for T in [5e2, 1e3, 5e3]:
solver_opts = {'tolerance':tol, 'max_subintervals':500}
tmp_sol = model.solve_multiple_shooting(k_min, T, guess, bc, **solver_opts)
# compute the analytic solution
grid = np.linspace(0, T, 1000)
analytic_M_S = ramsey_analytic_solution(k_min, grid, model.args)
# compute the average approximation error
tmp_M_S = np.hstack((grid[:,np.newaxis], tmp_sol(grid).T))
L2_error = model.get_L2_errors(analytic_M_S, tmp_M_S)
print 'L^2 approximation error for tol=%g and T=%g is %g' %(tol, T, L2_error)
# estimate the run-time
%timeit -n 1 -r 3 model.solve_multiple_shooting(k_min, T, guess, bc, **solver_opts)
print ''
L^2 approximation error for tol=0.1 and T=500 is 1.07706 1 loops, best of 3: 23 ms per loop L^2 approximation error for tol=0.1 and T=1000 is 10.6753 1 loops, best of 3: 27.4 ms per loop L^2 approximation error for tol=0.1 and T=5000 is 0.780274 1 loops, best of 3: 253 ms per loop L^2 approximation error for tol=0.01 and T=500 is 2.0182 1 loops, best of 3: 24.6 ms per loop L^2 approximation error for tol=0.01 and T=1000 is 17.3834 1 loops, best of 3: 31.9 ms per loop L^2 approximation error for tol=0.01 and T=5000 is 0.000135369 1 loops, best of 3: 261 ms per loop L^2 approximation error for tol=0.001 and T=500 is 0.369221 1 loops, best of 3: 33.4 ms per loop L^2 approximation error for tol=0.001 and T=1000 is 2.43851 1 loops, best of 3: 50.7 ms per loop L^2 approximation error for tol=0.001 and T=5000 is 0.00013557 1 loops, best of 3: 302 ms per loop L^2 approximation error for tol=0.0001 and T=500 is 0.0269296 1 loops, best of 3: 45.1 ms per loop L^2 approximation error for tol=0.0001 and T=1000 is 0.0542089 1 loops, best of 3: 76.9 ms per loop L^2 approximation error for tol=0.0001 and T=5000 is 0.000133266 1 loops, best of 3: 352 ms per loop L^2 approximation error for tol=1e-05 and T=500 is 0.00287892 1 loops, best of 3: 65.2 ms per loop L^2 approximation error for tol=1e-05 and T=1000 is 0.00506856 1 loops, best of 3: 112 ms per loop L^2 approximation error for tol=1e-05 and T=5000 is 0.000133079 1 loops, best of 3: 383 ms per loop L^2 approximation error for tol=1e-06 and T=500 is 0.000282065 1 loops, best of 3: 92.2 ms per loop L^2 approximation error for tol=1e-06 and T=1000 is 0.000453135 1 loops, best of 3: 182 ms per loop L^2 approximation error for tol=1e-06 and T=5000 is 0.000189241 1 loops, best of 3: 533 ms per loop L^2 approximation error for tol=1e-07 and T=500 is 2.23962e-05 1 loops, best of 3: 145 ms per loop L^2 approximation error for tol=1e-07 and T=1000 is 3.89705e-05 1 loops, best of 3: 255 ms per loop L^2 approximation error for tol=1e-07 and T=5000 is 1.17329e-05 1 loops, best of 3: 653 ms per loop L^2 approximation error for tol=1e-08 and T=500 is 2.11746e-06 1 loops, best of 3: 276 ms per loop L^2 approximation error for tol=1e-08 and T=1000 is 3.21072e-06 1 loops, best of 3: 390 ms per loop L^2 approximation error for tol=1e-08 and T=5000 is 5.66946e-06 1 loops, best of 3: 775 ms per loop L^2 approximation error for tol=1e-09 and T=500 is 1.94358e-07 1 loops, best of 3: 424 ms per loop L^2 approximation error for tol=1e-09 and T=1000 is 3.1081e-07 1 loops, best of 3: 611 ms per loop L^2 approximation error for tol=1e-09 and T=5000 is 4.76174e-07 1 loops, best of 3: 1.09 s per loop
analytic_M_S
array([[ 0. , 4.63299433, 1.1057148 ], [ 1.001001 , 4.83492258, 1.12139156], [ 2.002002 , 5.02956371, 1.13609264], ..., [ 997.997998 , 9.26598865, 1.3898983 ], [ 998.998999 , 9.26598865, 1.3898983 ], [ 1000. , 9.26598865, 1.3898983 ]])
tmp_M_S(grid).T
array([[ 4.63299433, 1.1057148 ], [ 4.83492258, 1.12139156], [ 5.02956371, 1.13609264], ..., [ 9.26598865, 1.3898983 ], [ 9.26598865, 1.3898983 ], [ 9.26598865, 1.3898983 ]])
plt.plot(tmp_M_S[:,1], tmp_M_S[:,2])
plt.show()
--------------------------------------------------------------------------- IndexError Traceback (most recent call last) <ipython-input-112-81c2cd6820a3> in <module>() ----> 1 plt.plot(tmp_M_S[:,1], tmp_M_S[:,2]) 2 plt.show() IndexError: index 2 is out of bounds for axis 1 with size 2
grid = np.linspace(k_min, k_max, 1000)
analytic_pol = ramsey_analytic_consumption_policy(grid, model.args)
for eps in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9]:
for h in [1e-1, 1e-2, 1e-3]:
kwargs = {'h':h, 'eps':eps, 'integrator':'dopri5', 'k':3, 'options':{'rtol':1e-9}}
tmp_policy = model.get_consumption_policy(k_min, k_max, method='reverse', **kwargs)
# compute the average approximation error
L2_error = get_L2_errors(analytic_pol, tmp_policy(grid))
print 'L^2 approximation error for eps=%g and h=%g is %g' %(eps, h, L2_error)
# estimate the run-time
%timeit -n 1 -r 3 model.get_consumption_policy(k_min, k_max, method='reverse', **kwargs)
L^2 approximation error for eps=0.1 and h=0.1 is 5.81272e-05 1 loops, best of 3: 39.3 ms per loop L^2 approximation error for eps=0.1 and h=0.01 is 7.88959e-05 1 loops, best of 3: 362 ms per loop L^2 approximation error for eps=0.1 and h=0.001 is 8.30534e-05 1 loops, best of 3: 3.63 s per loop L^2 approximation error for eps=0.01 and h=0.1 is 8.38639e-08 1 loops, best of 3: 38.4 ms per loop L^2 approximation error for eps=0.01 and h=0.01 is 1.81989e-07 1 loops, best of 3: 361 ms per loop L^2 approximation error for eps=0.01 and h=0.001 is 2.36569e-07 1 loops, best of 3: 3.72 s per loop L^2 approximation error for eps=0.001 and h=0.1 is 6.98518e-09 1 loops, best of 3: 38.6 ms per loop L^2 approximation error for eps=0.001 and h=0.01 is 2.91517e-10 1 loops, best of 3: 363 ms per loop L^2 approximation error for eps=0.001 and h=0.001 is 2.44835e-10 1 loops, best of 3: 3.91 s per loop L^2 approximation error for eps=0.0001 and h=0.1 is 7.09362e-09 1 loops, best of 3: 39 ms per loop L^2 approximation error for eps=0.0001 and h=0.01 is 1.99087e-10 1 loops, best of 3: 364 ms per loop L^2 approximation error for eps=0.0001 and h=0.001 is 2.62901e-13 1 loops, best of 3: 3.84 s per loop L^2 approximation error for eps=1e-05 and h=0.1 is 7.10672e-09 1 loops, best of 3: 42.9 ms per loop L^2 approximation error for eps=1e-05 and h=0.01 is 2.46788e-10 1 loops, best of 3: 366 ms per loop L^2 approximation error for eps=1e-05 and h=0.001 is 8.82842e-14 1 loops, best of 3: 3.99 s per loop L^2 approximation error for eps=1e-06 and h=0.1 is 7.10807e-09 1 loops, best of 3: 53.3 ms per loop L^2 approximation error for eps=1e-06 and h=0.01 is 2.52222e-10 1 loops, best of 3: 388 ms per loop L^2 approximation error for eps=1e-06 and h=0.001 is 8.07547e-14 1 loops, best of 3: 4.2 s per loop L^2 approximation error for eps=1e-07 and h=0.1 is 7.1082e-09 1 loops, best of 3: 42.5 ms per loop L^2 approximation error for eps=1e-07 and h=0.01 is 2.52614e-10 1 loops, best of 3: 402 ms per loop L^2 approximation error for eps=1e-07 and h=0.001 is 9.54105e-14 1 loops, best of 3: 4.1 s per loop L^2 approximation error for eps=1e-08 and h=0.1 is 7.10821e-09 1 loops, best of 3: 42.3 ms per loop L^2 approximation error for eps=1e-08 and h=0.01 is 2.53054e-10 1 loops, best of 3: 399 ms per loop L^2 approximation error for eps=1e-08 and h=0.001 is 1.07808e-13 1 loops, best of 3: 4.21 s per loop L^2 approximation error for eps=1e-09 and h=0.1 is 7.10697e-09 1 loops, best of 3: 42.7 ms per loop L^2 approximation error for eps=1e-09 and h=0.01 is 2.50795e-10 1 loops, best of 3: 411 ms per loop L^2 approximation error for eps=1e-09 and h=0.001 is 1.72701e-13 1 loops, best of 3: 4.01 s per loop
grid = np.linspace(k_min, k_max, 1000)
analytic_pol = ramsey_analytic_consumption_policy(grid, model.args)
for eps in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]:
kwargs = {'h':eps, 'eps':eps, 'integrator':'dopri5', 'k':3, 'options':{'rtol':1e-9}}
tmp_policy = model.get_consumption_policy(k_min, k_max, method='reverse', **kwargs)
# compute the average approximation error
L2_error = get_L2_errors(analytic_pol, tmp_policy(grid))
print 'L^2 approximation error for eps=%g and h=%g is %g' %(eps, eps, L2_error)
# estimate the run-time
%timeit -n 1 -r 3 model.get_consumption_policy(k_min, k_max, method='reverse', **kwargs)
L^2 approximation error for eps=0.1 and h=0.1 is 5.81272e-05 1 loops, best of 3: 38.6 ms per loop L^2 approximation error for eps=0.01 and h=0.01 is 1.81989e-07 1 loops, best of 3: 362 ms per loop L^2 approximation error for eps=0.001 and h=0.001 is 2.44835e-10 1 loops, best of 3: 3.68 s per loop L^2 approximation error for eps=0.0001 and h=0.0001 is 2.84974e-13 1 loops, best of 3: 1min 19s per loop
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-128-fe1076d1d2fb> in <module>() 5 6 kwargs = {'h':eps, 'eps':eps, 'integrator':'dopri5', 'k':3, 'options':{'rtol':1e-9}} ----> 7 tmp_policy = model.get_consumption_policy(k_min, k_max, method='reverse', **kwargs) 8 9 # compute the average approximation error /Users/clarissasweet/Projects/pyeconomics/models/growth.pyc in get_consumption_policy(self, kmin, kmax, method, **kwargs) 1436 1437 # compute the stable manifold -> 1438 M_S = self.get_stable_manifold(kmin, kmax, method, **kwargs) 1439 1440 # construct a callable B-spline repr of the policy function /Users/clarissasweet/Projects/pyeconomics/models/growth.pyc in get_stable_manifold(self, kmin, kmax, method, **kwargs) 1280 1281 lower_M_S = self.solve_reverse_shooting(kmin, -h, eps, integrator, -> 1282 **options) 1283 upper_M_S = self.solve_reverse_shooting(kmax, h, eps, integrator, 1284 **options) /Users/clarissasweet/Projects/pyeconomics/models/growth.pyc in solve_reverse_shooting(self, k0, h, eps, integrator, step, relax, **kwargs) 1128 ode.integrate(ode.t + h, step, relax) 1129 current_step = np.hstack((ode.t, ode.y)) -> 1130 solution = np.vstack((solution, current_step)) 1131 1132 # check to see if initial condition has been reached /Users/clarissasweet/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/core/shape_base.pyc in vstack(tup) 224 225 """ --> 226 return _nx.concatenate(map(atleast_2d,tup),0) 227 228 def hstack(tup): KeyboardInterrupt:
tmp_policy[::-1]
array([[ 9.16598865, 1.3849483 ], [ 9.16698865, 1.38499834], [ 9.16798865, 1.38504838], [ 9.16898865, 1.38509842], [ 9.16998865, 1.38514846], [ 9.17098865, 1.38519851], [ 9.17198865, 1.38524855], [ 9.17298865, 1.38529859], [ 9.17398865, 1.38534864], [ 9.17498865, 1.38539868], [ 9.17598865, 1.38544873], [ 9.17698865, 1.38549878], [ 9.17798865, 1.38554883], [ 9.17898865, 1.38559888], [ 9.17998865, 1.38564893], [ 9.18098865, 1.38569899], [ 9.18198865, 1.38574904], [ 9.18298865, 1.3857991 ], [ 9.18398865, 1.38584917], [ 9.18498865, 1.38589923], [ 9.18598865, 1.3859493 ], [ 9.18698865, 1.38599937], [ 9.18798865, 1.38604945], [ 9.18898865, 1.38609953], [ 9.18998865, 1.38614961], [ 9.19098865, 1.3861997 ], [ 9.19198865, 1.38624979], [ 9.19298865, 1.38629989], [ 9.19398865, 1.38634999], [ 9.19498865, 1.3864001 ], [ 9.19598865, 1.38645022], [ 9.19698865, 1.38650034], [ 9.19798865, 1.38655047], [ 9.19898865, 1.38660061], [ 9.19998865, 1.38665076], [ 9.20098865, 1.38670092], [ 9.20198865, 1.38675108], [ 9.20298865, 1.38680126], [ 9.20398865, 1.38685145], [ 9.20498865, 1.38690165], [ 9.20598865, 1.38695186], [ 9.20698865, 1.38700208], [ 9.20798865, 1.38705232], [ 9.20898865, 1.38710257], [ 9.20998865, 1.38715284], [ 9.21098865, 1.38720313], [ 9.21198865, 1.38725344], [ 9.21298865, 1.38730376], [ 9.21398865, 1.38735411], [ 9.21498865, 1.38740447], [ 9.21598865, 1.38745487], [ 9.21698865, 1.38750528], [ 9.21798865, 1.38755573], [ 9.21898865, 1.38760621], [ 9.21998865, 1.38765671], [ 9.22098865, 1.38770726], [ 9.22198865, 1.38775784], [ 9.22298865, 1.38780846], [ 9.22398865, 1.38785912], [ 9.22498865, 1.38790983], [ 9.22598865, 1.38796059], [ 9.22698865, 1.3880114 ], [ 9.22798865, 1.38806228], [ 9.22898865, 1.38811322], [ 9.22998865, 1.38816423], [ 9.23098865, 1.38821532], [ 9.23198865, 1.3882665 ], [ 9.23298865, 1.38831777], [ 9.23398865, 1.38836915], [ 9.23498865, 1.38842064], [ 9.23598865, 1.38847226], [ 9.23698865, 1.38852402], [ 9.23798865, 1.38857594], [ 9.23898865, 1.38862804], [ 9.23998865, 1.38868035], [ 9.24098865, 1.38873288], [ 9.24198865, 1.38878568], [ 9.24298865, 1.38883879], [ 9.24398865, 1.38889224], [ 9.24498865, 1.38894611], [ 9.24598865, 1.38900047], [ 9.24698865, 1.3890554 ], [ 9.24798865, 1.38911103], [ 9.24898865, 1.38916752], [ 9.24998865, 1.38922509], [ 9.25098865, 1.38928403], [ 9.25198865, 1.38934479], [ 9.25298865, 1.38940801], [ 9.25398865, 1.38947481], [ 9.25498865, 1.38954718], [ 9.25598865, 1.38962949], [ 9.25698865, 1.38973575], [ 9.25764875, 1.3899045 ]])
reverse_shooting_policies = {}
forward_shooting_policies = {}
for step in [1.0, 0.1, 0.01, 0.001]:
kwargs = {'rtol':1e-9}
# compute the policy function using forward shooting
tmp_pol = model.get_consumption_policy(1, k_min, k_max, h=step, tol=1e-4, integrator='dopri5', **kwargs)
forward_shooting_policies[step] = tmp_pol
# compute the policy function using reverse shooting
tmp_pol = model.get_consumption_policy(1, k_min, k_max, h=step, eps=step, integrator='dopri5', **kwargs)
reverse_shooting_policies[step] = tmp_pol
print 'Done with h=%g' %step
for step in [1.0, 0.1, 0.01, 0.001]:
tmp_pol = forward_shooting_policies[step]
print np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - tmp_pol(plot_grid))**2)**0.5
# compute the policy function using reverse shooting
tmp_pol = reverse_shooting_policies[step]
print np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - tmp_pol(plot_grid))**2)**0.5
print 'Done with h=%g' %step
kwargs = {'T':1e2, 'solution_guess':guess, 'boundary_conditions':bc, 'k':1}
benchmark_policy = model.get_consumption_policy(k_min, k_max, method='multiple', **kwargs)
reverse_shooting_policies = {}
forward_shooting_policies = {}
for step in [1.0, 0.1, 0.01, 0.001]:
kwargs = {'rtol':1e-9}
# compute the policy function using forward shooting
tmp_pol = model.get_consumption_policy(1, k_min, k_max, h=step, tol=1e-4, integrator='dopri5', **kwargs)
forward_shooting_policies[step] = tmp_pol
# compute the policy function using reverse shooting
tmp_pol = model.get_consumption_policy(1, k_min, k_max, h=step, eps=step, integrator='dopri5', **kwargs)
reverse_shooting_policies[step] = tmp_pol
print 'Done with h=%g' %step
for step in [1.0, 0.1, 0.01, 0.001]:
tmp_pol = forward_shooting_policies[step]
print np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - tmp_pol(plot_grid))**2)**0.5
# compute the policy function using reverse shooting
tmp_pol = reverse_shooting_policies[step]
print np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - tmp_pol(plot_grid))**2)**0.5
print 'Done with h=%g' %step
plot_grid = np.linspace(k_min, k_max, 1000)
plt.figure()
plt.plot(plot_grid, ramsey_analytic_consumption_policy(plot_grid, model.args), 'r-')
plt.plot(plot_grid, benchmark_policy(plot_grid), 'b-')
plt.show()
# compute benchmark policy function using reverse shooting with very small h
#kwargs = {'h':1e0, 'eps':1e-6, 'integrator':'dopri5', 'k':1.0}
kwargs = {'T':1000, 'guess':None, 'k':1.0}
benchmark_policy = model.get_consumption_policy(k_min, k_max, method='multiple', **kwargs)
# compute benchmark policy function using reverse shooting with very small h
#kwargs = {'h':1e0, 'eps':1e-6, 'integrator':'dopri5', 'k':1.0}
kwargs = {'T':1000, 'guess':None, 'k':1.0}
benchmark_policy = model.get_consumption_policy(k_min, k_max, method='multiple', **kwargs)
plot_grid = np.linspace(k_min, k_max, 1000)
plt.figure()
plt.plot(plot_grid, ramsey_analytic_consumption_policy(plot_grid, model.args), 'r-')
plt.plot(plot_grid, benchmark_policy(plot_grid), 'b-')
plt.show()
sol = model.solve_multiple_shooting(k_min, 10)
model.steady_state.values['k_star']
9.265988652495905
sol.solution
array([[ 4.63299433, 5.00190629, 5.37037684, 5.73568031, 6.09519978, 6.44683676, 6.78654857, 7.11002733, 7.41462331, 7.69757511, 7.95654033, 8.19014603, 8.39737439, 8.57802651, 8.7326777 , 8.86215379, 8.96814154, 9.05283843, 9.11869682, 9.16831724, 9.20437128, 9.22945564, 9.24599967, 9.25617471, 9.26185134, 9.26460633, 9.26567874, 9.26595595, 9.26598189, 9.26598542, 9.26598604], [ 1.10571453, 1.13402695, 1.16094119, 1.18642882, 1.21047183, 1.23308517, 1.25415968, 1.27357992, 1.29133253, 1.30739102, 1.32174511, 1.33442741, 1.34547637, 1.35496026, 1.36297339, 1.36960929, 1.37499317, 1.37926498, 1.38256816, 1.38504638, 1.38684141, 1.38808752, 1.38890813, 1.38941233, 1.38969347, 1.38982987, 1.38988296, 1.38989668, 1.38989799, 1.3898982 , 1.3898983 ]])
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
0.00011831131571665765
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
4.3714282551262889
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
1.1942285268741086e-06
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
0.5644156256657098
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
1.2112922866447431e-08
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
0.14327714300320468
np.sum((ramsey_analytic_consumption_policy(plot_grid, model.args) - benchmark_policy(plot_grid))**2)**0.5
1.217247729087086e-10
Done with h=1 Done with h=0.1 Done with h=0.01 Done with h=0.001
0.0970540474686 0.0211672371143 Done with h=1 0.142869991476 0.00011831133772 Done with h=0.1 0.143195763183 1.19422852687e-06 Done with h=0.01 0.143214380603 1.21129228664e-08 Done with h=0.001
grid = np.linspace(k_min, k_max, 100)
forward_shooting_policies[0.0001](grid) - benchmark_policy(grid)
array([ 1.01765725e-06, 1.08538537e-06, 1.15764668e-06, 1.23491163e-06, 1.31766221e-06, 1.40650272e-06, 1.50210049e-06, 1.60522988e-06, 1.71678920e-06, 1.83782680e-06, 1.96957204e-06, 2.11347883e-06, 2.27127641e-06, 2.44504033e-06, 2.63728220e-06, 2.85106980e-06, 3.09020032e-06, 3.35944343e-06, 3.66474783e-06, 4.01382311e-06, 4.41674427e-06, 4.88696472e-06, 5.44278294e-06, 6.10979398e-06, 6.92491146e-06, 7.94355839e-06, 9.25283942e-06, 1.09977333e-05, 1.34389418e-05, 1.70975458e-05, 2.31883558e-05, 3.53536373e-05, 7.18383237e-05, 1.40214780e-06, -4.23401908e-05, -2.16034104e-05, -1.46812542e-05, -1.12152994e-05, -9.13356265e-06, -7.74471761e-06, -6.75215959e-06, -6.00747976e-06, -5.42816756e-06, -4.96468460e-06, -4.58548737e-06, -4.26953005e-06, -4.00224050e-06, -3.77320457e-06, -3.57478601e-06, -3.40124864e-06, -3.24820662e-06, -3.11225223e-06, -2.99068842e-06, -2.88135700e-06, -2.78251251e-06, -2.69272672e-06, -2.61082036e-06, -2.53580989e-06, -2.46686853e-06, -2.40329598e-06, -2.34449620e-06, -2.28995825e-06, -2.23924125e-06, -2.19196321e-06, -2.14779159e-06, -2.10643526e-06, -2.06763819e-06, -2.03117447e-06, -1.99684415e-06, -1.96446944e-06, -1.93389169e-06, -1.90496878e-06, -1.87757338e-06, -1.85159089e-06, -1.82691785e-06, -1.80346060e-06, -1.78113409e-06, -1.75986106e-06, -1.73957126e-06, -1.72020050e-06, -1.70169001e-06, -1.68398591e-06, -1.66703869e-06, -1.65080279e-06, -1.63523624e-06, -1.62030028e-06, -1.60595910e-06, -1.59217953e-06, -1.57893081e-06, -1.56618441e-06, -1.55391378e-06, -1.54209421e-06, -1.53070265e-06, -1.51971759e-06, -1.50911895e-06, -1.49888791e-06, -1.48900688e-06, -1.47945935e-06, -1.47022983e-06, -1.46130377e-06])
grid = np.linspace(k_min, k_max, 100)
reverse_shooting_policies[0.0001](grid) - benchmark_policy(grid)
array([ 1.66246461e-10, 1.77195481e-10, 1.88900451e-10, 2.01436312e-10, 2.14888218e-10, 2.29353203e-10, 2.44943732e-10, 2.61788036e-10, 2.80036327e-10, 2.99862579e-10, 3.21471849e-10, 3.45106166e-10, 3.71053965e-10, 3.99659972e-10, 4.31343072e-10, 4.66141237e-10, 5.05074982e-10, 5.49016055e-10, 5.98961880e-10, 6.56197541e-10, 7.21961491e-10, 7.98808020e-10, 8.89697538e-10, 9.98747529e-10, 1.13196952e-09, 1.29847710e-09, 1.51249169e-09, 1.79768378e-09, 2.19664842e-09, 2.79450174e-09, 3.78955300e-09, 5.77570436e-09, 1.17142875e-08, 0.00000000e+00, -1.21331520e-08, -6.19711749e-09, -4.21224278e-09, -3.21802562e-09, -2.62077382e-09, -2.22228813e-09, -1.93749106e-09, -1.72382419e-09, -1.55761581e-09, -1.42449741e-09, -1.31566269e-09, -1.22531052e-09, -1.14844712e-09, -1.08240994e-09, -1.02586628e-09, -9.75582282e-10, -9.31288602e-10, -8.91988261e-10, -8.56894555e-10, -8.25375768e-10, -7.96922084e-10, -7.71118058e-10, -7.47615969e-10, -7.26129157e-10, -7.06415815e-10, -6.88271440e-10, -6.71523281e-10, -6.56020571e-10, -6.41633635e-10, -6.28253671e-10, -6.15780316e-10, -6.04130301e-10, -5.93228133e-10, -5.83008308e-10, -5.73409542e-10, -5.64385427e-10, -5.55887558e-10, -5.47871526e-10, -5.40302247e-10, -5.33141753e-10, -5.26370725e-10, -5.19952303e-10, -5.13863618e-10, -5.08079134e-10, -5.02591524e-10, -4.97364594e-10, -4.92388796e-10, -4.87652807e-10, -4.83134643e-10, -4.78830753e-10, -4.74721595e-10, -4.70789185e-10, -4.67038630e-10, -4.63443506e-10, -4.60014915e-10, -4.56724658e-10, -4.53568516e-10, -4.50554039e-10, -4.47665016e-10, -4.44893011e-10, -4.42232695e-10, -4.39677628e-10, -4.37222480e-10, -4.34866587e-10, -4.32615055e-10, -4.30449010e-10])
grid = np.linspace(k_min, k_max, 100)
for step in [0.01, 0.001, 0.0001]:
plt.plot(grid, np.abs(forward_shooting_policies[step](grid) - benchmark_policy(grid)))
plt.ylim(1e-16, 1e0)
plt.yscale('log')
plt.show()
plt.figure(figsize=(8,6))
grid = np.linspace(k_min, k_max, 1000)
for step in [0.1, 0.01, 0.001, 0.0001]:
plt.plot(grid, np.abs(reverse_shooting_policies[step](grid) - benchmark_policy(grid)))
plt.ylim(1e-16, 1e0)
plt.yscale('log')
plt.show()
model.steady_state.values['k_star']
3.3013033273505767
c_policy = model.get_stable_manifold(k_lower, k_upper, h=1.0, tol=1e-3, integrator='dopri5')
model.get_stable_manifold?
# specify some initial conditions
k0 = 0.5 * model.steady_state.values['k_star']
k_star = model.steady_state.values['k_star']
c_star = model.steady_state.values['c_star']
model.steady_state.set_eigen_star(model.jac(0, [k_star, c_star], model.args))
traj = model.solve_reverse_shooting(k0, h=1e-1, eps=1e-1, integrator='erk4')
# solve the model analytically!
analtyic_traj = ramsey_analytic_solution(k0, traj[:,0], params)
model.compare_trajectories(analtyic_traj, traj[::-1])
array([[ 0.01543713, 0.00826471], [ 0.01486159, 0.00795658], [ 0.01429036, 0.00765076], [ 0.01372633, 0.00734878], [ 0.01317181, 0.00705191], [ 0.01262865, 0.00676111], [ 0.0120983 , 0.00647717], [ 0.01158187, 0.00620069], [ 0.01108019, 0.0059321 ], [ 0.01059388, 0.00567174], [ 0.01012332, 0.00541981], [ 0.00966876, 0.00517645], [ 0.0092303 , 0.00494171], [ 0.00880793, 0.00471558], [ 0.00840154, 0.004498 ], [ 0.00801094, 0.00428889], [ 0.00763589, 0.0040881 ], [ 0.0072761 , 0.00389547], [ 0.00693121, 0.00371082], [ 0.00660085, 0.00353396], [ 0.00628463, 0.00336466], [ 0.00598213, 0.00320271], [ 0.00569292, 0.00304787], [ 0.00541657, 0.00289991], [ 0.00515262, 0.0027586 ], [ 0.00490064, 0.0026237 ], [ 0.00466019, 0.00249497], [ 0.00443083, 0.00237217], [ 0.00421212, 0.00225508], [ 0.00400364, 0.00214346], [ 0.00380497, 0.0020371 ], [ 0.00361572, 0.00193578], [ 0.00343547, 0.00183928], [ 0.00326385, 0.0017474 ], [ 0.00310048, 0.00165993], [ 0.00294501, 0.00157669], [ 0.00279707, 0.00149749], [ 0.00265633, 0.00142214], [ 0.00252247, 0.00135048], [ 0.00239517, 0.00128232]])
model.get_stable_manifold(0.5 * k_star, 2.0 * k_star, h=1.0, eps=1e-5)
array([[ 0.35012074, 0.18744722], [ 0.73120832, 0.39147342], [ 0.99466356, 0.53252177], [ 1.16156401, 0.62187673], [ 1.26359531, 0.67650213], [ 1.32488681, 0.7093163 ], [ 1.36136513, 0.72884601], [ 1.38296208, 0.74040856], [ 1.39570981, 0.74723343], [ 1.40322439, 0.75125657], [ 1.40764771, 0.75362473], [ 1.41024976, 0.75501781], [ 1.4117801 , 0.75583712], [ 1.41267863, 0.75631817], [ 1.4132091 , 0.75660218], [ 1.41352059, 0.75676894], [ 1.41370319, 0.7568667 ], [ 1.41381041, 0.7569241 ], [ 1.41387341, 0.75695783], [ 1.41391015, 0.7569775 ], [ 1.41393152, 0.75698894], [ 1.41394402, 0.75699563], [ 1.41396159, 0.75700504], [ 1.41397917, 0.75701445], [ 1.41399167, 0.75702114], [ 1.41401304, 0.75703259], [ 1.41404978, 0.75705226], [ 1.41411278, 0.75708599], [ 1.41422001, 0.75714339], [ 1.41440264, 0.75724117], [ 1.41471422, 0.75740798], [ 1.41524494, 0.75769212], [ 1.41614422, 0.75817357], [ 1.41767668, 0.75899403], [ 1.42028488, 0.7603904 ], [ 1.42472597, 0.76276806], [ 1.43229179, 0.76681864], [ 1.44518779, 0.77372289], [ 1.46721347, 0.78551497], [ 1.50493221, 0.80570878], [ 1.56981393, 0.84044508], [ 1.68223452, 0.90063268], [ 1.87926228, 1.00611716], [ 2.2305181 , 1.19417208], [ 2.87182623, 1.53751485]])
c_k = model.get_consumption_policy(1, 0.5 * k_star, 2.0 * k_star, h=1.0, tol=1e-6, integrator='forward_euler')
c_k(np.linspace(0, 2 * k_star)) - ramsey_analytic_consumption_policy(np.linspace(0, 2 * k_star), model.args)
array([ 6.15101503e-14, 5.45327672e-14, 4.75522399e-14, 4.05786516e-14, 3.35981243e-14, 2.66175970e-14, 1.97619698e-14, 1.26010313e-14, 5.66213743e-15, -1.22124533e-15, -8.27116153e-15, -1.51545443e-14, -2.22044605e-14, -2.91988655e-14, -3.61377595e-14, -4.31321645e-14, -5.00710584e-14, -5.75095527e-14, -7.67164110e-14, -9.59232693e-14, -1.16573418e-13, -1.70308212e-13, -2.44693155e-13, -4.27657909e-13, -1.37923006e-12, -2.20638285e-10, -7.50792761e-11, -4.75257611e-11, -3.66253694e-11, -2.78860268e-11, -2.48689958e-11, -2.19612106e-11, -1.90536475e-11, -1.74555925e-11, -1.65130132e-11, -1.55708779e-11, -1.46282986e-11, -1.36859413e-11, -1.27433619e-11, -1.22981625e-11, -1.20079502e-11, -1.17172938e-11, -1.14268595e-11, -1.11362031e-11, -1.08457687e-11, -1.05553344e-11, -1.02649000e-11, -9.97424365e-12, -9.68380931e-12, -9.39315292e-12])
plt.plot(np.linspace(0, 2 * k_star), c_k(np.linspace(0, 2 * k_star)))
plt.plot(np.linspace(0, 2 * k_star), ramsey_analytic_consumption_policy(np.linspace(0, 2 * k_star), model.args))
[<matplotlib.lines.Line2D at 0x9ab7210>]
ti = np.linspace(0, 3.9, 100)
model.interpolate(traj[:,:-1], ti, k=3, ext=0)
array([[ 0. , 1.31396159], [ 0.03939394, 1.31187305], [ 0.07878788, 1.3097415 ], [ 0.11818182, 1.30756606], [ 0.15757576, 1.30534583], [ 0.1969697 , 1.30307992], [ 0.23636364, 1.30076745], [ 0.27575758, 1.29840747], [ 0.31515152, 1.29599904], [ 0.35454545, 1.2935412 ], [ 0.39393939, 1.29103297], [ 0.43333333, 1.28847336], [ 0.47272727, 1.28586135], [ 0.51212121, 1.28319591], [ 0.55151515, 1.28047598], [ 0.59090909, 1.2777005 ], [ 0.63030303, 1.27486838], [ 0.66969697, 1.2719785 ], [ 0.70909091, 1.26902975], [ 0.74848485, 1.26602096], [ 0.78787879, 1.26295098], [ 0.82727273, 1.25981862], [ 0.86666667, 1.25662266], [ 0.90606061, 1.25336188], [ 0.94545455, 1.25003504], [ 0.98484848, 1.24664085], [ 1.02424242, 1.24317802], [ 1.06363636, 1.23964525], [ 1.1030303 , 1.23604118], [ 1.14242424, 1.23236448], [ 1.18181818, 1.22861376], [ 1.22121212, 1.22478761], [ 1.26060606, 1.22088461], [ 1.3 , 1.21690332], [ 1.33939394, 1.21284227], [ 1.37878788, 1.20869995], [ 1.41818182, 1.20447487], [ 1.45757576, 1.20016548], [ 1.4969697 , 1.19577021], [ 1.53636364, 1.1912875 ], [ 1.57575758, 1.18671572], [ 1.61515152, 1.18205324], [ 1.65454545, 1.17729843], [ 1.69393939, 1.17244959], [ 1.73333333, 1.16750502], [ 1.77272727, 1.16246302], [ 1.81212121, 1.15732182], [ 1.85151515, 1.15207966], [ 1.89090909, 1.14673476], [ 1.93030303, 1.14128529], [ 1.96969697, 1.13572943], [ 2.00909091, 1.13006531], [ 2.04848485, 1.12429107], [ 2.08787879, 1.11840479], [ 2.12727273, 1.11240456], [ 2.16666667, 1.10628845], [ 2.20606061, 1.10005448], [ 2.24545455, 1.0937007 ], [ 2.28484848, 1.08722509], [ 2.32424242, 1.08062564], [ 2.36363636, 1.07390033], [ 2.4030303 , 1.06704711], [ 2.44242424, 1.06006391], [ 2.48181818, 1.05294867], [ 2.52121212, 1.0456993 ], [ 2.56060606, 1.03831369], [ 2.6 , 1.03078974], [ 2.63939394, 1.02312534], [ 2.67878788, 1.01531836], [ 2.71818182, 1.00736666], [ 2.75757576, 0.99926813], [ 2.7969697 , 0.99102063], [ 2.83636364, 0.98262202], [ 2.87575758, 0.97407017], [ 2.91515152, 0.96536297], [ 2.95454545, 0.9564983 ], [ 2.99393939, 0.94747405], [ 3.03333333, 0.93828814], [ 3.07272727, 0.9289385 ], [ 3.11212121, 0.91942307], [ 3.15151515, 0.90973984], [ 3.19090909, 0.89988681], [ 3.23030303, 0.88986201], [ 3.26969697, 0.87966354], [ 3.30909091, 0.8692895 ], [ 3.34848485, 0.85873807], [ 3.38787879, 0.84800748], [ 3.42727273, 0.837096 ], [ 3.46666667, 0.82600201], [ 3.50606061, 0.81472393], [ 3.54545455, 0.80326025], [ 3.58484848, 0.79160962], [ 3.62424242, 0.77977071], [ 3.66363636, 0.76774235], [ 3.7030303 , 0.75552345], [ 3.74242424, 0.74311304], [ 3.78181818, 0.73051046], [ 3.82121212, 0.7177151 ], [ 3.86060606, 0.70472637], [ 3.9 , 0.69154367]])
ramsey_analytic_consumption_policy(traj[:,1], model.args) - traj[:,2]
array([ 0.00000000e+00, -5.55111512e-17, 0.00000000e+00, 0.00000000e+00, -5.55111512e-17, 0.00000000e+00, -5.55111512e-17, -5.55111512e-17, -5.55111512e-17, -1.11022302e-16, -1.11022302e-16, -1.11022302e-16, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.11022302e-16, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, -1.11022302e-16, -1.11022302e-16, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.11022302e-16, 1.11022302e-16, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.11022302e-16, 0.00000000e+00, 1.11022302e-16, 1.11022302e-16])
traj[:,2]
array([ 0.67650213, 0.7093163 , 0.72884601, 0.74040856, 0.74723343, 0.75125657, 0.75362473, 0.75501781, 0.75583712, 0.75631817, 0.75660218, 0.75676894, 0.7568667 , 0.7569241 , 0.75695783, 0.7569775 , 0.75698894, 0.75699563, 0.75699969])
array([[ 8.97012186e-03, 4.80241297e-03], [ 5.35450441e-03, 2.86668808e-03], [ 3.17443249e-03, 1.69952382e-03], [ 1.87487322e-03, 1.00376735e-03], [ 1.10568807e-03, 5.91961938e-04], [ 6.48957057e-04, 3.47437843e-04], [ 3.80295112e-04, 2.03601936e-04], [ 2.22582919e-04, 1.19166173e-04], [ 1.29884727e-04, 6.95375280e-05], [ 7.67350365e-05, 4.10823108e-05], [ 4.33335592e-05, 2.31998684e-05], [ 2.40890703e-05, 1.28967773e-05], [ 1.32983300e-05, 7.11964377e-06], [ 7.09179034e-06, 3.79679411e-06], [ 3.47254822e-06, 1.85912864e-06], [ 1.64429592e-06, 8.80321125e-07], [ 7.96329025e-07, 4.26337632e-07], [ 3.65099121e-07, 1.95466315e-07], [ 1.18152866e-07, 6.32565381e-08]])
import math
In this task you will learn how to approximate the optimal consumption/savings decision rule for the representative household using a first-order Taylor series approximation in the neighborhood of the steady state of the Ramsey economy.
def eulerLinear(k):
"""This period's consumption per effective worker can be written
as a function of current period capital stock per effective worker.
Inputs:
k_t-1: end of period t-1 level of capital per effective worker
Returns:
c_t: consumption per effective worker
"""
if np.abs(ramseyEigenvalues[0]) < 1:
i = 0
elif np.abs(ramseyEigenvalues[1]) < 1:
i = 1
else:
print 'Ahhh! Jacobian has no stable eigenvalue!'
return analytic_c_star() + (ramseyEigenvectors[1,i] / ramseyEigenvectors[0,i]) * (k - analytic_k_star())
def capitalLinear(k):
"""Linear approximation of the equation of motion for capital
per effective worker.
Inputs:
k_t-1: end of period t-1 capital per effective worker
Returns: next period's capital stock per effective worker, k_t
"""
kNext = analytic_k_star() + ramseyJacobian[0,0] * (k - analytic_k_star()) + \
ramseyJacobian[0,1] * (eulerLinear(k) - analytic_c_star())
return kNext
def capitalQuadratic(k):
"""Quadratic approximation of the equation of motion for capital
per effective worker.
Inputs:
k_t-1: end of period t-1 capital per effective worker
Returns: next period's capital stock per effective worker, k_t
"""
pass
Generate sample paths of the linear Ramsey model. Because the linearized model expresses next period's capital stock per effective worker (consumption per effective worker) in terms of the current period's capital stock per effective worker (consumption per effective worker) we can make use of the Python class DS from the Solow lab to simulate trajectories of the capital and consumption per effective worker for the linearized Ramsey economy!
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Given an initial level of capital
k0 = analytic_k_star() / 8
# Create a new instance of the class ramseyDS
ramsey_linear = DS(F=capitalLinear)
# Generate a sample path for the linearized model
#ramsey_linear.X = k0
#k_linear = ramsey_linear.sample_path(1000)
#c_linear = eulerLinear(k_linear)
# Create a new plot
plt.figure(figsize=(8,6))
model.plot_phaseSpace(gridmax=200)
# Add the initial level of capital per worker
plt.axvline(k0, color='k', ls='--', label=r'$k_{0}$')
# Plot the linearized sample path
plt.plot(grid, eulerLinear(grid), color='purple')
# Add a title to the plot
plt.title('Linear approximation of the saddle path')
# Add a legend
plt.legend(frameon=False)
plt.savefig('Graphics/Ramsey-Linear-Path.png')
plt.show()
reload(ramsey)
model = ramsey.ramseyModel(ramsey_params)
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Create a new instance of the class ramseyDS
model = ramsey.ramseyModel(ramsey_params)
# Given an initial level of capital
k0 = 0.5 * analytic_k_star()
c0 = eulerLinear(k0)
# Create a new plot
plt.figure(figsize=(8,6))
model.plot_phaseSpace(gridmax=200)
# plot the sample path starting from the linearized guess
model.k, model.c = k0, c0
tmpPath = model.get_samplePath(T=1000)
plt.plot(tmpPath[:, 0], tmpPath[:, 1], color='r')
# Add a title to the plot
plt.title('Linearized guess is not a "good" one!', fontsize=15)
# Add a legend
plt.legend(frameon=False)
plt.savefig('Graphics/Ramsey-Sample-Path-Linearized-Guess.png')
plt.show()
reload(ramsey)
<module 'pyeconomics.models.ramsey' from '/Users/clarissasweet/Projects/pyeconomics/models/ramsey.py'>
# create an instance of the Python object representing the Ramsey model
model = ramsey.ramseyModel(ramsey_params)
# pick an initial condition for capital...
k0 = analytic_k_star() / 8
# solve the model!
ramsey_solution_lower = model.solve_forwardShoot(k0, tol=1.5e-4)
# pick an initial condition for capital...
k0 = analytic_k_star() * 8
# solve the model!
ramsey_solution_upper = model.solve_forwardShoot(k0, tol=1.5e-4)
# Create a new plot
plt.figure(figsize=(8,6))
# plot the Ramsey phase diagram
ax = model.plot_phaseSpace(gridmax=200)[0]
# Plot the full saddle-path
ax.plot(ramsey_solution_lower[1][:, 0], ramsey_solution_lower[1][:, 1], color='r')
ax.plot(ramsey_solution_upper[1][:, 0], ramsey_solution_upper[1][:, 1], color='r')
# Plot the linearized sample path
ax.plot(grid, eulerLinear(grid), color='purple')
# Axes, labels, title, legend, etc
ax.set_xlim(0, 200)
ax.set_title('Saddlepath of the Ramsey economy', fontsize=20)
# Add a legend
plt.legend(frameon=False)
plt.savefig('Graphics/Ramsey-Full-Nonlinear-Saddle-Path.png')
plt.show()
plt.figure(figsize=(8,6))
ax = model.plot_phase_diagram(7.5)[0]
colors = mpl.cm.cool(np.linspace(0,1,10))
# compute the steady state values
k_star = model.steady_state.values['k_star']
c_star = model.steady_state.values['c_star']
# plot the unstable manifold
init = [k_star - 1e-5, c_star + 1e-5]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.25, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], label='$M_U$', color=colors[0])
init = [k_star + 1e-5, c_star - 1e-5]
traj = model.integrate(t0=0, y0=init, T=1e3, h=0.25, integrator='lsoda')
ax.plot(traj[:,1], traj[:,2], color=colors[0])
# plot the stable manifold
model.steady_state.set_eigen_star(model.jac(0, [k_star, c_star], model.args))
optimal_traj = model.solve_reverse_shooting(0.1 * k_star, h=0.5, eps=1e-9)
ax.plot(optimal_traj[:,1], optimal_traj[:,2], label='$M_S$', color=colors[9])
optimal_traj = model.solve_reverse_shooting(2 * k_star, h=1.0, eps=1e-9)
ax.plot(optimal_traj[:,1], optimal_traj[:,2], color=colors[9])
ax.set_title('Phase diagram for the Ramsey (1928) model', fontsize=20)
ax.legend(loc='best', frameon=False, prop={'family':'serif'})
plt.savefig('Graphics/ramsey-phase-diagram-with-manifolds.pdf')
plt.show()
Very much homework related! So pay attention! Two different ways to analyze welfare in the Ramsey model:
Can calculate both measure for the linearized and full saddle path and compare. First, need to compare the linearized solution with the full non-linear solution...
# Compare Time path of consumption between the linear and non-linear solution
# Initialize the linearized and full saddle paths
ramsey_linear.X = analytic_k_star() / 8
# Simulate the time path of consumption for the linearized model
c_linear = eulerLinear(ramsey_linear.sample_path(ramsey_solution_lower[2]))
# Create a new plot
plt.figure(figsize=(8,8))
plt.plot(ramsey_solution_lower[1][:, 1], color='b', label=r'$c_{Full}$')
plt.plot(c_linear, color='g', label=r'$c_{Linear}$')
# Add the steady state value for capital per effective worker
plt.axhline(analytic_c_star(), ls='--', color='k', label=r'$c^{*}$')
# Don't forget to label your axes!
plt.xlim(0, 200)
plt.xlabel('Time')
plt.ylabel('Consumption per effective worker, c')
# Add the legend
plt.legend(loc='best', frameon=False)
# Add a title to the plot
plt.title('Solution paths of consumption per effective worker, c', weight='bold')
plt.savefig('Graphics/Full-vs-Linear-Saddlepath-Comparison.png')
plt.show()
Then we can plot the flow of utility over time...
# initial value for technology
A0 = 1.0
# Create a grid (useful for plotting technology...recall that consumption per worker C = c * A!)
grid = np.arange(0, ramsey_solution[2], 1)
# Create a new plot
plt.figure(figsize=(8,8))
plt.plot(grid, u(ramsey_solution[1][:, 1] * A0 * np.exp(g * grid)), color='b', label=r'$u_{Full}$')
plt.plot(grid, u(c_linear * A0 * np.exp(g * grid)), color='g', label=r'$u_{Linear}$')
# Add the steady state value for capital per effective worker
plt.plot(grid, u(c_star() * A0 * np.exp(g * grid)), ls='--', color='k', label=r'$u^{*}$')
# Don't forget to label your axes!
plt.xlim(0, ramsey_solution[2])
plt.xlabel('Time')
plt.ylabel('Instantaneous Welfare')
# Add the legend
plt.legend(loc='best', frameon=False)
# Add a title to the plot
plt.title('Flow of welfare (i.e., utility) for representative individual')
plt.savefig('Graphics/Instantaneous-utility-Ramsey-model.png')
plt.show()
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-135-51902ca01fa2> in <module>() 7 # Create a new plot 8 plt.figure(figsize=(8,8)) ----> 9 plt.plot(grid, u(ramsey_solution[1][:, 1] * A0 * np.exp(g * grid)), color='b', label=r'$u_{Full}$') 10 plt.plot(grid, u(c_linear * A0 * np.exp(g * grid)), color='g', label=r'$u_{Linear}$') 11 NameError: name 'g' is not defined
def lifetimeUtility(X, params):
"""
Function that returns the 'lifetime' utility associated with a consumption stream.
Note that the function automatically accounts for growth in technology and population.
Inputs:
1) An array, X, representing a consumption stream
Returns: A list containing...
1) Present discount value, in terms of utility, of the consumption stream
2) The path of discounted flow utility (for plotting)
"""
# extract params
g = params['g']
n = params['n']
rho = params['rho']
path = np.zeros(np.size(X))
for i in range(np.size(X)):
path[i] = (L0 / H) * np.exp((n - rho) * i) * u(X[i] * A0 * np.exp(g * i))
return [np.sum(path), path]
# Lifetime utility is higher (barely!) following the full, non-linear consumption path
print "Lifetime utility from the non-linear solution: ", lifetimeUtility(ramsey_solution[1][:, 1], params)[0]
print "Lifetime utility from the linearized solution: ", lifetimeUtility(c_linear, params)[0]
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-137-2aa5bdf8c13f> in <module>() 1 # Lifetime utility is higher (barely!) following the full, non-linear consumption path ----> 2 print "Lifetime utility from the non-linear solution: ", lifetimeUtility(ramsey_solution[1][:, 1], params)[0] 3 print "Lifetime utility from the linearized solution: ", lifetimeUtility(c_linear, params)[0] NameError: name 'params' is not defined
Lifetime utility from the non-linear solution:
In this task I will provide code to analyze a shock to the growth rate of technology. The code starts by assuming that you are initially in the steady state of the Ramsey economy (based on same parameters we have been working with above). I then double the quarterly growth rate of technology to $g=0.01$.
# reset old params
params = {'theta':theta, 'rho':rho, 'A0':A0, 'g':g, 'L0':L0, 'n':n, 'H':H, 'delta':delta, 'alpha':alpha}
# create an instance of the Python object representing the Ramsey model
ramsey = ramseyModel(params)
# start the economy in steady state...
old_k_star, old_c_star = ramsey.SS_dict['k_star'], ramsey.SS_dict['c_star']
ramsey.k, ramsey.c = old_k_star, old_c_star
The first plot demonstrates the shifts in the k and c locii and the transition to the new steady state in phase space. Note that an increas in g leads to a downward shift in the Δk=0 locus and a leftward shift in the Δc=0 locus (remember these are per effective worker variables!). At the time of the shock c jumps up before falling monotonically towards its new, lower steady state value.
# Create a grid of points for plotting
gridmax, gridsize = 200, 10000
grid = np.linspace(0, gridmax, gridsize)
# Create a new plot
plt.figure(figsize=(8,8))
# Add the c and k locii
plt.plot(grid, locusK(grid), color='orange', linestyle='dashed', label=r'$\Delta k_{old}=0$', alpha=0.5)
plt.axvline(ramsey.SS_dict['k_star'], linestyle='dashed', color='k', label=r'$\Delta c_{old}=0$', alpha=0.5)
plt.plot(ramsey.SS_dict['k_star'], ramsey.SS_dict['c_star'], marker='.', markersize=10, color='k', alpha=0.5)
# Don't forget to label your axes!
plt.xlim(0, 30)
plt.xlabel('k')
plt.ylim(0, 4)
plt.ylabel('c', rotation='horizontal')
# shock g! and create new ramseyModel object
new_params = params
g = 0.01
new_params['g'] = g
ramsey_new = ramseyModel(new_params)
# Add the c and k locii
plt.plot(grid, locusK(grid), color='orange', label=r'$\Delta k_{new}=0$')
plt.axvline(ramsey_new.SS_dict['k_star'], linestyle='solid', color='k', label=r'$\Delta c_{new}=0$')
plt.plot(ramsey_new.SS_dict['k_star'], ramsey_new.SS_dict['c_star'], marker='.', markersize=10, color='k')
# solve for the non-linear saddle path
ramsey_solution = ramsey_new.forward_shoot(old_k_star, tol=1.5e-4)
# Plot the full saddle-path
plt.plot(ramsey_solution[1][:, 0], ramsey_solution[1][:, 1], color='r', label='Saddle path')
plt.title('An increase in the growth rate of technology leads to...')
# add a legend
plt.legend(loc='best', frameon=False)
plt.savefig('Graphics/Shock to g in phase space!.png')
plt.show()
This second plot shows time paths of output per effective worker, $y$, and log output per worker, $ln(Y/L)$. Note that $y$ falls in response to the shock, while $Y/L$ jumps up!
# Suppose that economy was in its old steady state for 40 periods prior to shock to g
y_SamplePath1 = np.repeat(f(old_k_star), 40)
# y_SamplePath2 is constructed using the optimal path of k
y_SamplePath2 = f(ramsey_solution[1][:,0])
# combine the two parts of the sample path of y
y_SamplePath = np.hstack((y_SamplePath1, y_SamplePath2))
# create a new figure
fig = plt.figure(figsize=(8,8))
# define a grid of points for plotting
grid = np.arange(0, 40 + ramsey_solution[2], 1)
# create the first subplot
ax1 = fig.add_subplot(211)
ax1.set_title(r'A positive shock technology growth rate, g, leads to a fall in y...')
ax1.plot(grid, y_SamplePath, 'r', label='y')
ax1.axhline(y=f(k_star()), xmin=0, linestyle='dashed', color='k', label=r'$y_{new}^{*}$')
ax1.set_ylabel('y')
ax1.legend(loc='best', frameon=False)
# create the sample path of Y/L as follows...
old_g = 0.005
new_g = 0.01
Y_SamplePath1 = f(old_k_star) * A0 * np.exp(old_g * grid[:40]) # initial bit grows at old tech growth rate
A40 = A0 * np.exp(old_g * grid[39]) # value of A at time of shock
Y_SamplePath2 = f(ramsey_solution[1][:,0]) * A40 * np.exp(new_g * grid[:-40]) # last bit grows at new tech growth rate
Y_perWorkerSamplePath = np.hstack((Y_SamplePath1, Y_SamplePath2))
# create a second subplot
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.set_title(r'but Y/L is now on a more aggressive BGP!')
ax2.plot(grid, np.log(Y_perWorkerSamplePath), 'r', label=r'$ln\frac{Y}{L}$')
ax2.plot(grid, A0 + old_g * grid,'k--', label=r'$BGP_{old}$')
ax2.set_ylabel('ln(Y/L)')
ax2.legend(loc='best', frameon=False)
ax.set_xlabel('Time')
# tighten things up!
plt.tight_layout()
plt.savefig('Graphics/Samplepaths-of-output-following-shock.png')
plt.show()