%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import math
def euler(func, initial, deltas):
ys = [initial]
for i in range(1, len(xs)):
cur = np.array(ys[-1][:])
dt = xs[i] - xs[i - 1]
cur = cur + np.array(func(cur, xs[i])) * dt
ys.append(cur)
return np.array(ys)
We left off last time after revising our code to separate the differential equation from the numerical method. We have the above Euler's method code, and below, we test it versus the builtin function odeint.
from scipy.integrate import odeint
def growth(u, t):
return u[0] * 0.1
def f(k, r, t):
return k * math.e ** (r * t)
init = [4]
xs = np.linspace(0, 20, 20)
ys = euler(growth, init, xs)
ybs = odeint(growth, init, xs)
plt.plot(xs, ys[:, 0])
plt.plot(xs, ybs[:, 0])
plt.plot(xs, f(4, 0.1, xs))
[<matplotlib.lines.Line2D at 0x10a420e50>]
How much better can we get Euler's method, as compared to the real function? If we decrease the step size, we can get closer. The relationship between our decrease is linear, $O(dt)$, such that if we half the step size, we halve the relative error of the solution.
old = 1
for i in range(20):
dt = 10.0 / 2 ** i
end = 20
xs = np.linspace(0, end, int(end / dt))
ys = euler(growth, init, xs)
newerr = abs(ys[-1][0] - f(4, 0.1, end))
print("%f \t%f\t%f\t%f\t%f\t" % \
(dt, ys[-1][0], f(4, 0.1, end), newerr, newerr / old))
old = newerr
10.000000 12.000000 29.556224 17.556224 17.556224 5.000000 18.518519 29.556224 11.037706 0.628706 2.500000 23.231180 29.556224 6.325044 0.573040 1.250000 26.147184 29.556224 3.409040 0.538975 0.625000 27.783031 29.556224 1.773194 0.520145 0.312500 28.651466 29.556224 0.904759 0.510242 0.156250 29.099172 29.556224 0.457052 0.505165 0.078125 29.326513 29.556224 0.229711 0.502593 0.039062 29.441070 29.556224 0.115154 0.501299 0.019531 29.498572 29.556224 0.057652 0.500650 0.009766 29.527380 29.556224 0.028845 0.500325 0.004883 29.541797 29.556224 0.014427 0.500163 0.002441 29.549010 29.556224 0.007215 0.500081 0.001221 29.552617 29.556224 0.003608 0.500041 0.000610 29.554421 29.556224 0.001804 0.500020 0.000305 29.555322 29.556224 0.000902 0.500010 0.000153 29.555773 29.556224 0.000451 0.500005 0.000076 29.555999 29.556224 0.000225 0.500003 0.000038 29.556112 29.556224 0.000113 0.500001 0.000019 29.556168 29.556224 0.000056 0.500001
We can do better than this. We use an approach called 2nd order Runge Kutta (RK2) to improve our estimate from Euler with a correction based on the derivative of the found point.
def rk2(func, initial, deltas):
ys = [initial]
for i in range(1, len(xs)):
old = np.array(ys[-1][:])
dt = xs[i] - xs[i - 1]
d1 = np.array(func(old, xs[i]))
cur = old + np.array(func(old, xs[i])) * dt
d2 = np.array(func(cur, xs[i] + dt))
better = old + ((d1 + d2) / 2.0) * dt
ys.append(better)
return np.array(ys)
init = [4]
xs = np.linspace(0, 20, 5)
ys = euler(growth, init, xs)
ybs = rk2(growth, init, xs)
ycs = odeint(growth, init, xs)
plt.plot(xs, ys[:, 0])
plt.plot(xs, ybs[:, 0])
plt.plot(xs, ycs[:, 0])
[<matplotlib.lines.Line2D at 0x10abc9ed0>]
And we can see how the error changes as we decrease the value of dt. RK2 is on the order of $O(dt^2)$.
old = 1
for i in range(20):
dt = 10.0 / 2 ** i
end = 20
xs = np.linspace(0, end, int(end / dt))
ys = rk2(growth, init, xs)
newerr = abs(ys[-1][0] - f(4, 0.1, end))
print("%f \t%f\t%f\t%f\t%f\t" % \
(dt, ys[-1][0], f(4, 0.1, end), newerr, newerr / old))
old = newerr
10.000000 20.000000 29.556224 9.556224 9.556224 5.000000 26.957476 29.556224 2.598748 0.271943 2.500000 28.912186 29.556224 0.644038 0.247826 1.250000 29.398087 29.556224 0.158137 0.245540 0.625000 29.517176 29.556224 0.039049 0.246929 0.312500 29.546530 29.556224 0.009694 0.248255 0.156250 29.553810 29.556224 0.002415 0.249075 0.078125 29.555622 29.556224 0.000602 0.249525 0.039062 29.556074 29.556224 0.000150 0.249759 0.019531 29.556187 29.556224 0.000038 0.249879 0.009766 29.556215 29.556224 0.000009 0.249939 0.004883 29.556222 29.556224 0.000002 0.249970 0.002441 29.556224 29.556224 0.000001 0.249985 0.001221 29.556224 29.556224 0.000000 0.249993 0.000610 29.556224 29.556224 0.000000 0.249998 0.000305 29.556224 29.556224 0.000000 0.249986 0.000153 29.556224 29.556224 0.000000 0.249982 0.000076 29.556224 29.556224 0.000000 0.250236 0.000038 29.556224 29.556224 0.000000 0.248044 0.000019 29.556224 29.556224 0.000000 0.247504
This is good! We can do better, by continuing this approach. 4th Order Runge Kutta (RK4) also includes estimates of the derivative based on the midpoint of our earlier estimates.
def rk4(func, initial, deltas):
ys = [initial]
for i in range(1, len(xs)):
old = np.array(ys[-1][:])
dt = xs[i] - xs[i - 1]
d1 = np.array(func(old, xs[i])) * dt
d2 = np.array(func(old + 0.5 * d1, xs[i] + 0.5 * dt)) * dt
d3 = np.array(func(old + 0.5 * d2, xs[i] + 0.5 * dt)) * dt
d4 = np.array(func(old + d3, xs[i] + dt)) * dt
better = old + ((d1 + 2 * d2 + 2 * d3 + d4) / 6.0)
ys.append(better)
return np.array(ys)
old = 1
for i in range(20):
dt = 10.0 / 2 ** i
end = 20
xs = np.linspace(0, end, int(end / dt))
ys = rk4(growth, init, xs)
newerr = abs(ys[-1][0] - f(4, 0.1, end))
print("%f \t%f\t%f\t%f\t%f\t" % \
(dt, ys[-1][0], f(4, 0.1, end), newerr, newerr / old))
old = newerr
10.000000 28.000000 29.556224 1.556224 1.556224 5.000000 29.500175 29.556224 0.056049 0.036016 2.500000 29.553635 29.556224 0.002589 0.046195 1.250000 29.556085 29.556224 0.000139 0.053816 0.625000 29.556216 29.556224 0.000008 0.058045 0.312500 29.556224 29.556224 0.000000 0.060246 0.156250 29.556224 29.556224 0.000000 0.061367 0.078125 29.556224 29.556224 0.000000 0.061932 0.039062 29.556224 29.556224 0.000000 0.062201 0.019531 29.556224 29.556224 0.000000 0.062086 0.009766 29.556224 29.556224 0.000000 0.041729 0.004883 29.556224 29.556224 0.000000 0.285714 0.002441 29.556224 29.556224 0.000000 0.041667 0.001221 29.556224 29.556224 0.000000 73.000000 0.000610 29.556224 29.556224 0.000000 0.808219 0.000305 29.556224 29.556224 0.000000 1.440678 0.000153 29.556224 29.556224 0.000000 0.270588 0.000076 29.556224 29.556224 0.000000 1.434783 0.000038 29.556224 29.556224 0.000000 4.272727 0.000019 29.556224 29.556224 0.000000 2.290780
This is great! RK4 has order of the error $O(dt^4)$. It only needs a dt of size 0.625 to get the same relative error that Euler's method needed 0.000019. Even though there are more calculations involved in RK4, it has much better accuracy and is highly used across simulation software implementations.