mirror of
https://github.com/AtsushiSakai/PythonRobotics.git
synced 2026-04-22 03:00:41 -04:00
fix cal_covariance func and clean up code and added jupyter doc
This commit is contained in:
62
Localization/particle_filter/particle_filter.ipynb
Normal file
62
Localization/particle_filter/particle_filter.ipynb
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"collapsed": true,
|
||||
"pycharm": {
|
||||
"name": "#%% md\n"
|
||||
}
|
||||
},
|
||||
"source": [
|
||||
"# Particle Filter\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## How to calculate covariance matrix from particles\n",
|
||||
"\n",
|
||||
"$\\Xi_{j,k}=\\frac{1}{1-\\sum^N_{i=1}(w^i)^2}\\sum^N_{i=1}w^i(x^i_j-\\mu_j)(x^i_k-\\mu_k)$\n",
|
||||
"\n",
|
||||
"Ref:\n",
|
||||
"\n",
|
||||
"- [Improving the particle filter in high dimensions using conjugate artificial process noise](https://www.visiondummy.com/2014/04/draw-error-ellipse-representing-covariance-matrix/)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 2
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython2",
|
||||
"version": "2.7.6"
|
||||
},
|
||||
"pycharm": {
|
||||
"stem_cell": {
|
||||
"cell_type": "raw",
|
||||
"source": [],
|
||||
"metadata": {
|
||||
"collapsed": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
@@ -37,20 +37,20 @@ def calc_input():
|
||||
return u
|
||||
|
||||
|
||||
def observation(xTrue, xd, u, RF_ID):
|
||||
xTrue = motion_model(xTrue, u)
|
||||
def observation(x_true, xd, u, rf_id):
|
||||
x_true = motion_model(x_true, u)
|
||||
|
||||
# add noise to gps x-y
|
||||
z = np.zeros((0, 3))
|
||||
|
||||
for i in range(len(RF_ID[:, 0])):
|
||||
for i in range(len(rf_id[:, 0])):
|
||||
|
||||
dx = xTrue[0, 0] - RF_ID[i, 0]
|
||||
dy = xTrue[1, 0] - RF_ID[i, 1]
|
||||
dx = x_true[0, 0] - rf_id[i, 0]
|
||||
dy = x_true[1, 0] - rf_id[i, 1]
|
||||
d = math.hypot(dx, dy)
|
||||
if d <= MAX_RANGE:
|
||||
dn = d + np.random.randn() * Q_sim[0, 0] ** 0.5 # add noise
|
||||
zi = np.array([[dn, RF_ID[i, 0], RF_ID[i, 1]]])
|
||||
zi = np.array([[dn, rf_id[i, 0], rf_id[i, 1]]])
|
||||
z = np.vstack((z, zi))
|
||||
|
||||
# add noise to input
|
||||
@@ -60,7 +60,7 @@ def observation(xTrue, xd, u, RF_ID):
|
||||
|
||||
xd = motion_model(xd, ud)
|
||||
|
||||
return xTrue, z, xd, ud
|
||||
return x_true, z, xd, ud
|
||||
|
||||
|
||||
def motion_model(x, u):
|
||||
@@ -86,13 +86,17 @@ def gauss_likelihood(x, sigma):
|
||||
return p
|
||||
|
||||
|
||||
def calc_covariance(xEst, px, pw):
|
||||
def calc_covariance(x_est, px, pw):
|
||||
"""
|
||||
calculate covariance matrix
|
||||
see ipynb doc
|
||||
"""
|
||||
cov = np.zeros((3, 3))
|
||||
|
||||
for i in range(px.shape[1]):
|
||||
dx = (px[:, i] - xEst)[0:3]
|
||||
cov += pw[0, i] * dx.dot(dx.T)
|
||||
cov /= NP
|
||||
n_particle = px.shape[1]
|
||||
for i in range(n_particle):
|
||||
dx = (px[:, i:i + 1] - x_est)[0:3]
|
||||
cov += pw[0, i] * dx @ dx.T
|
||||
cov *= 1.0 / (1.0 - pw @ pw.T)
|
||||
|
||||
return cov
|
||||
|
||||
@@ -125,13 +129,13 @@ def pf_localization(px, pw, z, u):
|
||||
|
||||
pw = pw / pw.sum() # normalize
|
||||
|
||||
xEst = px.dot(pw.T)
|
||||
PEst = calc_covariance(xEst, px, pw)
|
||||
x_est = px.dot(pw.T)
|
||||
p_est = calc_covariance(x_est, px, pw)
|
||||
|
||||
N_eff = 1.0 / (pw.dot(pw.T))[0, 0] # Effective particle number
|
||||
if N_eff < NTh:
|
||||
px, pw = re_sampling(px, pw)
|
||||
return xEst, PEst, px, pw
|
||||
return x_est, p_est, px, pw
|
||||
|
||||
|
||||
def re_sampling(px, pw):
|
||||
@@ -140,8 +144,8 @@ def re_sampling(px, pw):
|
||||
"""
|
||||
|
||||
w_cum = np.cumsum(pw)
|
||||
base = np.arange(0.0, 1.0, 1/NP)
|
||||
re_sample_id = base + np.random.uniform(0, 1/NP)
|
||||
base = np.arange(0.0, 1.0, 1 / NP)
|
||||
re_sample_id = base + np.random.uniform(0, 1 / NP)
|
||||
indexes = []
|
||||
ind = 0
|
||||
for ip in range(NP):
|
||||
@@ -155,9 +159,9 @@ def re_sampling(px, pw):
|
||||
return px, pw
|
||||
|
||||
|
||||
def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
|
||||
Pxy = PEst[0:2, 0:2]
|
||||
eig_val, eig_vec = np.linalg.eig(Pxy)
|
||||
def plot_covariance_ellipse(x_est, p_est): # pragma: no cover
|
||||
p_xy = p_est[0:2, 0:2]
|
||||
eig_val, eig_vec = np.linalg.eig(p_xy)
|
||||
|
||||
if eig_val[0] >= eig_val[1]:
|
||||
big_ind = 0
|
||||
@@ -186,8 +190,8 @@ def plot_covariance_ellipse(xEst, PEst): # pragma: no cover
|
||||
Rot = np.array([[math.cos(angle), -math.sin(angle)],
|
||||
[math.sin(angle), math.cos(angle)]])
|
||||
fx = Rot.dot(np.array([[x, y]]))
|
||||
px = np.array(fx[0, :] + xEst[0, 0]).flatten()
|
||||
py = np.array(fx[1, :] + xEst[1, 0]).flatten()
|
||||
px = np.array(fx[0, :] + x_est[0, 0]).flatten()
|
||||
py = np.array(fx[1, :] + x_est[1, 0]).flatten()
|
||||
plt.plot(px, py, "--r")
|
||||
|
||||
|
||||
@@ -197,54 +201,54 @@ def main():
|
||||
time = 0.0
|
||||
|
||||
# RF_ID positions [x, y]
|
||||
RFi_ID = np.array([[10.0, 0.0],
|
||||
[10.0, 10.0],
|
||||
[0.0, 15.0],
|
||||
[-5.0, 20.0]])
|
||||
rf_id = np.array([[10.0, 0.0],
|
||||
[10.0, 10.0],
|
||||
[0.0, 15.0],
|
||||
[-5.0, 20.0]])
|
||||
|
||||
# State Vector [x y yaw v]'
|
||||
xEst = np.zeros((4, 1))
|
||||
xTrue = np.zeros((4, 1))
|
||||
x_est = np.zeros((4, 1))
|
||||
x_true = np.zeros((4, 1))
|
||||
|
||||
px = np.zeros((4, NP)) # Particle store
|
||||
pw = np.zeros((1, NP)) + 1.0 / NP # Particle weight
|
||||
xDR = np.zeros((4, 1)) # Dead reckoning
|
||||
x_dr = np.zeros((4, 1)) # Dead reckoning
|
||||
|
||||
# history
|
||||
hxEst = xEst
|
||||
hxTrue = xTrue
|
||||
hxDR = xTrue
|
||||
h_x_est = x_est
|
||||
h_x_true = x_true
|
||||
h_x_dr = x_true
|
||||
|
||||
while SIM_TIME >= time:
|
||||
time += DT
|
||||
u = calc_input()
|
||||
|
||||
xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFi_ID)
|
||||
x_true, z, x_dr, ud = observation(x_true, x_dr, u, rf_id)
|
||||
|
||||
xEst, PEst, px, pw = pf_localization(px, pw, z, ud)
|
||||
x_est, PEst, px, pw = pf_localization(px, pw, z, ud)
|
||||
|
||||
# store data history
|
||||
hxEst = np.hstack((hxEst, xEst))
|
||||
hxDR = np.hstack((hxDR, xDR))
|
||||
hxTrue = np.hstack((hxTrue, xTrue))
|
||||
h_x_est = np.hstack((h_x_est, x_est))
|
||||
h_x_dr = np.hstack((h_x_dr, x_dr))
|
||||
h_x_true = np.hstack((h_x_true, x_true))
|
||||
|
||||
if show_animation:
|
||||
plt.cla()
|
||||
# for stopping simulation with the esc key.
|
||||
plt.gcf().canvas.mpl_connect('key_release_event',
|
||||
lambda event: [exit(0) if event.key == 'escape' else None])
|
||||
lambda event: [exit(0) if event.key == 'escape' else None])
|
||||
|
||||
for i in range(len(z[:, 0])):
|
||||
plt.plot([xTrue[0, 0], z[i, 1]], [xTrue[1, 0], z[i, 2]], "-k")
|
||||
plt.plot(RFi_ID[:, 0], RFi_ID[:, 1], "*k")
|
||||
plt.plot([x_true[0, 0], z[i, 1]], [x_true[1, 0], z[i, 2]], "-k")
|
||||
plt.plot(rf_id[:, 0], rf_id[:, 1], "*k")
|
||||
plt.plot(px[0, :], px[1, :], ".r")
|
||||
plt.plot(np.array(hxTrue[0, :]).flatten(),
|
||||
np.array(hxTrue[1, :]).flatten(), "-b")
|
||||
plt.plot(np.array(hxDR[0, :]).flatten(),
|
||||
np.array(hxDR[1, :]).flatten(), "-k")
|
||||
plt.plot(np.array(hxEst[0, :]).flatten(),
|
||||
np.array(hxEst[1, :]).flatten(), "-r")
|
||||
plot_covariance_ellipse(xEst, PEst)
|
||||
plt.plot(np.array(h_x_true[0, :]).flatten(),
|
||||
np.array(h_x_true[1, :]).flatten(), "-b")
|
||||
plt.plot(np.array(h_x_dr[0, :]).flatten(),
|
||||
np.array(h_x_dr[1, :]).flatten(), "-k")
|
||||
plt.plot(np.array(h_x_est[0, :]).flatten(),
|
||||
np.array(h_x_est[1, :]).flatten(), "-r")
|
||||
plot_covariance_ellipse(x_est, PEst)
|
||||
plt.axis("equal")
|
||||
plt.grid(True)
|
||||
plt.pause(0.001)
|
||||
|
||||
Reference in New Issue
Block a user