Untitled

 avatar
unknown
plain_text
3 years ago
8.2 kB
5
Indexable
# define imports
import math
import matplotlib.pyplot as plt
import numpy as np

# define constants
ROBOT_WIDTH = np.nan  # measure and put
TOTAL_TIME = 1000  # total simulation time [s]

# your arbitrary start position
X_INIT = [0, 0, 0.5*np.pi]
MU = X_INIT.T
COVARIANCE = np.zeros((3, 3))


def get_motor_movement():
    # returns the motor movement in this timestep
    # return (l, r)
    pass


def camera_detections(camera_coords):
    # get landmarks from camera image and return a list
    # where each item is a landmark observation:
    # [[actual_measurement, world_coords, camera_coords, id], ...]
    pass


class EKF_SLAM:

    def __init__(self):
        # initialize the mu and covariance with the defined global constants
        self.mu = MU
        self.cov = COVARIANCE
        # create an empty landmark map where landmark id and count will be stored as key-value pairs
        self.landmark_map = dict()
        self.total_landmarks = 0

    def get_alpha_angle(l, r):
        return (r-l)/ROBOT_WIDTH

    def get_radius_capitalR(l, alpha):
        if alpha != 0:
            return l/alpha
        return 0

    def predict(self, u):
        # get l & r from the movement variable u
        [l, r] = u
        theta = self.mu[2]
        cov_u = np.array([[np.var(l), 0],
                          [0, np.var(r)]])
        alpha = self.get_alpha_angle(l, r)
        if alpha != 0:
            R = l/alpha
        else:
            R = np.nan
        if (l == r):
            # compute G, A, B, C, D
            G = np.array([[1, 0, ((l/alpha) + ROBOT_WIDTH/2)*(np.cos(theta + alpha) - np.cos(theta))],
                          [0, 1, ((l/alpha) + ROBOT_WIDTH/2) *
                           (np.sin(theta + alpha) - np.sin(theta))],
                          [0, 0, 1]])
            A = (1/2)*(np.cos(theta) + (l/ROBOT_WIDTH)*np.sin(theta))
            B = (1/2)*(np.sin(theta) - (l/ROBOT_WIDTH)*np.cos(theta))
            C = (1/2)*(np.cos(theta) - (l/ROBOT_WIDTH)*np.sin(theta))
            D = (1/2)*(np.sin(theta) + (l/ROBOT_WIDTH)*np.cos(theta))
        else:
            # compute G, A, B, C, D
            G = np.array([[1, 0, -l*np.sin(theta)],
                          [0, 1, ((l/alpha) + ROBOT_WIDTH/2) *
                           (np.sin(theta + alpha) - np.sin(theta))],
                          [0, 0, 1]])
            A = ((ROBOT_WIDTH * r)/np.square(r - l)) * \
                (np.sin(theta + (r-l)/ROBOT_WIDTH) - np.sin(theta))
            - (r+l)/(2*(r-l))*np.cos(theta + (r-l)/ROBOT_WIDTH)
            B = ((ROBOT_WIDTH * r)/np.square(r - l)) * \
                (-np.cos(theta + (r-l)/ROBOT_WIDTH) + np.cos(theta))
            - (r+l)/(2*(r-l))*np.sin(theta + (r-l)/ROBOT_WIDTH)
            C = -((ROBOT_WIDTH * l)/np.square(r - l)) * \
                (np.sin(theta + (r-l)/ROBOT_WIDTH) - np.sin(theta))
            + (r+l)/(2*(r-l))*np.cos(theta + (r-l)/ROBOT_WIDTH)
            D = ((ROBOT_WIDTH * l)/np.square(r - l)) * \
                (-np.cos(theta + (r-l)/ROBOT_WIDTH) + np.cos(theta))
            + (r+l)/(2*(r-l))*np.sin(theta + (r-l)/ROBOT_WIDTH)

        # compute V
        V = np.array([[A, C],
                      [B, D],
                      [-(1/ROBOT_WIDTH), (1/ROBOT_WIDTH)]])

        # compute G_hat and V_hat as directed in the EKFSLAM pdf to be used going forward
        G_hat = np.array([[G, np.zeros((self.total_landmarks, self.total_landmarks))],
                          [np.zeros((self.total_landmarks, self.total_landmarks)), np.eye(self.total_landmarks)]])

        V_hat = np.array([[V], [np.zeros((self.total_landmarks, self.total_landmarks))]])

        # according to Robot Motion Model of EKFSLAM pdf
        x_y = np.array([[self.mu[0]], [self.mu[1]]])
        if alpha == 0:
            x_prime_y_prime = x_y + \
                (l*np.array([[np.cos(theta)], [np.sin(theta)]]))
            theta_prime = theta
        else:
            x_prime_y_prime = x_y + (R+(ROBOT_WIDTH/2)) * np.array([np.sin(
                theta + alpha) - np.sin(theta)], [(-np.cos(theta + alpha)) - (-np.cos(theta))])
            theta_prime = (theta + alpha) % (2*np.pi)

        # Predict new state
        self.mu = np.append(x_prime_y_prime, theta_prime, self.mu[3:])

        # Predict new covariance
        self.cov = G_hat.dot(self.cov).dot(G_hat.T) + \
            (V_hat).dot(cov_u).dot(V_hat.T)

    def add_landmark(self, world_coords, id):
        # get world coordinates of the newly found landmark
        [x_i, y_i] = world_coords
        # update the mu
        self.mu = np.append(self.mu, x_i, y_i)
        cov_i = np.array([[np.var(x_i), 0],
                          [0, np.var(y_i)]])
        # update the covariance
        self.cov = np.array([[self.cov, 0],
                             [0, cov_i]])
        # increase the count of total landmarks found                             
        self.total_landmarks += 1
        # set the landmark number in the map
        self.landmark_map[id] = self.total_landmarks

    def correction(self, actual_measurement, id):
        # get the actual measured r and alpha
        [r_act, alpha_act] = actual_measurement
        # compute Q
        Q = np.array([[np.var(r_act), 0], [0, np.var(alpha_act)]])
        # get landmark index number
        landmark_ind = self.landmark_map[id]

        # compute h(x, y, theta, x_m, y_m) = [[r], [alpha]]
        x_m_minus_x = self.mu[2*landmark_ind+3] - self.mu[0]
        y_m_minus_y = self.mu[2*landmark_ind+4] - self.mu[1]
        r = np.sqrt(np.square(x_m_minus_x) + np.square(y_m_minus_y))
        alpha = np.arctan(x_m_minus_x/y_m_minus_y) - self.mu[3]
        h = np.array([[r], [alpha]])

        # compute expected observations
        denom = (np.square(x_m_minus_x) + np.square(y_m_minus_y))
        delta_r_x = - (x_m_minus_x)/np.sqrt(denom)
        delta_alpha_x = (y_m_minus_y)/np.sqrt(denom)
        delta_r_y = - (y_m_minus_y)/np.sqrt(denom)
        delta_alpha_y = - (x_m_minus_x)/np.sqrt(denom)
        delta_r_xm = (x_m_minus_x)/np.sqrt(denom)
        delta_alpha_xm = (y_m_minus_y)/(denom)
        delta_r_ym = (y_m_minus_y)/np.sqrt(denom)
        delta_alpha_ym = (x_m_minus_x)/(denom)

        # calculate Jacobian
        H = np.array([[delta_r_x, delta_r_y, 0, delta_r_xm, delta_r_ym],
                      [delta_alpha_x, delta_alpha_y, 0, delta_alpha_xm, delta_alpha_ym]], dtype='float')

        # calculate Kalman gain
        K = self.cov.dot(H.T).dot(np.linalg.inv(H.dot(self.cov).dot(H.T)+Q))

        # calculate difference between expected and real observation
        m_dif = actual_measurement - h
        m_dif = (m_dif + np.pi) % (2*np.pi) - np.pi

        # update state vector and covariance matrix
        self.mu = self.mu + K.dot(m_dif)

        # identity matrix should be with size (3+2N)X(3+2N). Since we are considering
        # only one landmark here, N => 1. Therefore, (3+2N) => 5
        self.cov = (np.eye(5)-K.dot(H)).dot(self.cov)

    def get_robot_pose(self):
        return self.mu[:3]

    def get_landmark_positions(self):
        return self.mu[3:]


slam = EKF_SLAM()

for timestep in range(TOTAL_TIME):

    # movements is what is refered to as u = (l, r) in the document
    movements = get_motor_movement()
    slam.predict(movements)
    landmark_observations = camera_detections(slam.get_robot_pose())
    for landmark_observation in landmark_observations:
        actual_measurement, world_coords, camera_coords, id = landmark_observation

        # actual_measurement is the distance r and angle alpha of the landmark
        # → relative to the robot.
        # world_coords are the world coordinates of the landmark.
        # camera_coords are the camera coordinates of the landmark.
        # id is a unique number identifying the landmark.
        if id not in slam.landmark_map:
            slam.add_landmark(world_coords, id)

    slam.correction(actual_measurement, id)

print("landmark estimated positions:", slam.get_landmark_positions())
Editor is loading...