We consider a one-dimensional cart position tracking problem, see Faragher 2012.
The hidden states are the position $z_t$ and velocity $\dot z_t$. We can apply an external acceleration/breaking force $u_t$. (Noisy) observations are represented by $x_t$.
The equations of motions are given by
that 'explains' the time series $x^T$.
p(x^T,z^T) &= p(z_1) \prod_{t=2}^T p(z_t,|,z_{t-1}),\prod_{t=1}^T p(x_t,|,z_t) \end{align*}$$ the following statement holds: $$p(x_t,|,x_{t-1},x_{t-2}) \neq p(x_t,|,x_{t-1}),.$$ In other words, the latent variables $z_t$ represent a memory bank for past observations beyond $t-1$.
which is usually accompanied by an initial state distribution $\pi_k \triangleq p(z_{1k}=1)$.
with $$\begin{align*} \rho_t^2 &= \sigma_z^2 + a^2 \sigma_{t-1}^2 \tag{auxiliary variable}\\ K_t &= \frac{c \rho_t^2}{c^2 \rho_t^2 + \sigma_x^2} \tag{'Kalman gain'} \\ \mu_t &= a \mu_{t-1} + K_t \cdot \left( x_t - c a \mu_{t-1}\right) \tag{posterior mean}\\ \sigma_t^2 &= \left( 1 - K_t \right) \rho_t^2 \tag{posterior variance} \end{align*}$$
We can solve the cart tracking problem by sum-product message passing in a factor graph like the one depicted above. All we have to do is create factor nodes for the state-transition model $p(z_t|z_{t-1})$ and the observation model $p(x_t|z_t)$. Then we just build the factor graph and let ForneyLab (factor graph toolbox) perform message passing.
We'll implement the following model:
$$\begin{align*} \begin{bmatrix} z_t \\ \dot{z_t}\end{bmatrix} &= \begin{bmatrix} 1 & \Delta t \\ 0 & 1\end{bmatrix} \begin{bmatrix} z_{t-1} \\ \dot z_{t-1}\end{bmatrix} + \begin{bmatrix} (\Delta t)^2/2 \\ \Delta t\end{bmatrix} u_t + \mathcal{N}(0,\Sigma_z) \\ \mathbf{x}_t &= \begin{bmatrix} z_t \\ \dot{z_t}\end{bmatrix} + \mathcal{N}(0,\Sigma_x) \end{align*}$$using LinearAlgebra, PyPlot
include("scripts/cart_tracking_helpers.jl")
# Specify the model parameters
Δt = 1.0 # assume the time steps to be equal in size
A = [1.0 Δt;
0.0 1.0]
b = [0.5*Δt^2; Δt]
Σz = convert(Matrix,Diagonal([0.2*Δt; 0.1*Δt])) # process noise covariance
Σx = convert(Matrix,Diagonal([1.0; 2.0])) # observation noise covariance;
# Generate noisy observations
n = 10 # perform 10 timesteps
z_start = [10.0; 2.0] # initial state
u = 0.2 * ones(n) # constant input u
noisy_x = generateNoisyMeasurements(z_start, u, A, b, Σz, Σx);
control = 0.2
global t = 0
global m_pred_z = fill(Vector{Float64}(undef,2),n,1) #predictive means of states
global V_pred_z = fill(Matrix{Float64}(undef,2,2),n,1) #predictive covariance of states
global m_z = fill(Vector{Float64}(undef,2),n,1) # posterior means of states
global V_z = fill(Matrix{Float64}(undef,2,2),n,1) #posterior covariance of states
function performKalmanStep()
global t += 1
if t > 1
#predict
m_pred_z[t] = A*m_z[t-1] + control*b #predictive mean
V_pred_z[t] = A*V_z[t-1]*transpose(A) + Σz #predictive covariance
#update
residual = noisy_x[t] - m_pred_z[t] #error between the observation and prediction
gain = V_pred_z[t]*inv(V_pred_z[t]+Σx) #Kalman gain
m_z[t] = m_pred_z[t] + gain*residual #posterior mean update
V_z[t] = (Diagonal(I,2)-gain)*V_pred_z[t] #posterior covariance update
elseif t == 1
#predict
m_pred_z[t] = A*[0;0] + control*b #predictive mean
V_pred_z[t] = A*(1e8*Diagonal(I,2)*transpose(A)) + Σz #predictive covariance
#update
residual = noisy_x[t] - m_pred_z[t] #error between the observation and prediction
gain = V_pred_z[t]*inv(V_pred_z[t]+Σx) #Kalman gain
m_z[t] = m_pred_z[t] + gain*residual #posterior mean update
V_z[t] = (Diagonal(I,2)-gain)*V_pred_z[t] #posterior covariance update
end
end
while t < n
performKalmanStep()
end
plotCartPrediction2(m_pred_z[n][1],V_pred_z[n][1], m_z[n][1], V_z[n][1], noisy_x[n][1],Σx[1][1]);
open("../../styles/aipstyle.html") do f display("text/html", read(f, String)) end