clean up lqr planner

This commit is contained in:
Atsushi Sakai
2019-07-28 15:04:15 +09:00
parent 8061e73337
commit 86b80d55c3
2 changed files with 76 additions and 73 deletions

View File

@@ -15,105 +15,104 @@ import scipy.linalg as la
SHOW_ANIMATION = True
MAX_TIME = 100.0 # Maximum simulation time
DT = 0.1 # Time tick
class LQRPlanner:
def LQRplanning(sx, sy, gx, gy, show_animation=SHOW_ANIMATION):
def __init__(self):
self.MAX_TIME = 100.0 # Maximum simulation time
self.DT = 0.1 # Time tick
self.GOAL_DIST = 0.1
self.MAX_ITER = 150
self.EPS = 0.01
rx, ry = [sx], [sy]
def lqr_planning(self, sx, sy, gx, gy, show_animation=SHOW_ANIMATION):
x = np.array([sx - gx, sy - gy]).reshape(2, 1) # State vector
rx, ry = [sx], [sy]
# Linear system model
A, B = get_system_model()
x = np.array([sx - gx, sy - gy]).reshape(2, 1) # State vector
found_path = False
# Linear system model
A, B = self.get_system_model()
time = 0.0
while time <= MAX_TIME:
time += DT
found_path = False
u = LQR_control(A, B, x)
time = 0.0
while time <= self.MAX_TIME:
time += self.DT
x = A @ x + B @ u
u = self.lqr_control(A, B, x)
rx.append(x[0, 0] + gx)
ry.append(x[1, 0] + gy)
x = A @ x + B @ u
d = math.sqrt((gx - rx[-1])**2 + (gy - ry[-1])**2)
if d <= 0.1:
# print("Goal!!")
found_path = True
break
rx.append(x[0, 0] + gx)
ry.append(x[1, 0] + gy)
# animation
if show_animation: # pragma: no cover
plt.plot(sx, sy, "or")
plt.plot(gx, gy, "ob")
plt.plot(rx, ry, "-r")
plt.axis("equal")
plt.pause(1.0)
d = math.sqrt((gx - rx[-1]) ** 2 + (gy - ry[-1]) ** 2)
if d <= self.GOAL_DIST:
found_path = True
break
if not found_path:
print("Cannot found path")
return [], []
# animation
if show_animation: # pragma: no cover
plt.plot(sx, sy, "or")
plt.plot(gx, gy, "ob")
plt.plot(rx, ry, "-r")
plt.axis("equal")
plt.pause(1.0)
return rx, ry
if not found_path:
print("Cannot found path")
return [], []
return rx, ry
def solve_DARE(A, B, Q, R):
"""
solve a discrete time_Algebraic Riccati equation (DARE)
"""
X = Q
maxiter = 150
eps = 0.01
def solve_dare(self, A, B, Q, R):
"""
solve a discrete time_Algebraic Riccati equation (DARE)
"""
X, Xn = Q, Q
for i in range(maxiter):
Xn = A.T * X * A - A.T * X * B * \
la.inv(R + B.T * X * B) * B.T * X * A + Q
if (abs(Xn - X)).max() < eps:
break
X = Xn
for i in range(self.MAX_ITER):
Xn = A.T * X * A - A.T * X * B * \
la.inv(R + B.T * X * B) * B.T * X * A + Q
if (abs(Xn - X)).max() < self.EPS:
break
X = Xn
return Xn
return Xn
def dlqr(self, A, B, Q, R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
def dlqr(A, B, Q, R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
# ref Bertsekas, p.151
"""
# first, try to solve the ricatti equation
X = self.solve_dare(A, B, Q, R)
# first, try to solve the ricatti equation
X = solve_DARE(A, B, Q, R)
# compute the LQR gain
K = la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)
# compute the LQR gain
K = la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)
eigValues = la.eigvals(A - B @ K)
eigVals, eigVecs = la.eig(A - B @ K)
return K, X, eigValues
return K, X, eigVals
def get_system_model(self):
A = np.array([[self.DT, 1.0],
[0.0, self.DT]])
B = np.array([0.0, 1.0]).reshape(2, 1)
def get_system_model():
return A, B
A = np.array([[DT, 1.0],
[0.0, DT]])
B = np.array([0.0, 1.0]).reshape(2, 1)
def lqr_control(self, A, B, x):
return A, B
Kopt, X, ev = self.dlqr(A, B, np.eye(2), np.eye(1))
u = -Kopt @ x
def LQR_control(A, B, x):
Kopt, X, ev = dlqr(A, B, np.eye(2), np.eye(1))
u = -Kopt @ x
return u
return u
def main():
@@ -122,13 +121,15 @@ def main():
ntest = 10 # number of goal
area = 100.0 # sampling area
lqr_planner = LQRPlanner()
for i in range(ntest):
sx = 6.0
sy = 6.0
gx = random.uniform(-area, area)
gy = random.uniform(-area, area)
rx, ry = LQRplanning(sx, sy, gx, gy)
rx, ry = lqr_planner.lqr_planning(sx, sy, gx, gy)
if SHOW_ANIMATION: # pragma: no cover
plt.plot(sx, sy, "or")

View File

@@ -18,7 +18,7 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../LQRPlanner/")
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../RRTStar/")
try:
import LQRplanner
from LQRplanner import LQRPlanner
from rrt_star import RRTStar
except ImportError:
raise
@@ -59,6 +59,8 @@ class LQRRRTStar(RRTStar):
self.goal_xy_th = 0.5
self.step_size = step_size
self.lqr_planner = LQRPlanner()
def planning(self, animation=True, search_until_max_iter=True):
"""
RRT Star planning
@@ -131,7 +133,7 @@ class LQRRRTStar(RRTStar):
def calc_new_cost(self, from_node, to_node):
wx, wy = LQRplanner.LQRplanning(
wx, wy = self.lqr_planner.lqr_planning(
from_node.x, from_node.y, to_node.x, to_node.y, show_animation=False)
px, py, course_lengths = self.sample_path(wx, wy, self.step_size)
@@ -182,7 +184,7 @@ class LQRRRTStar(RRTStar):
def steer(self, from_node, to_node):
wx, wy = LQRplanner.LQRplanning(
wx, wy = self.lqr_planner.lqr_planning(
from_node.x, from_node.y, to_node.x, to_node.y, show_animation=False)
px, py, course_lens = self.sample_path(wx, wy, self.step_size)