diff --git a/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py b/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py index 9408132c5b8025..25b833ae8715bd 100755 --- a/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py +++ b/selfdrive/controls/lib/longitudinal_mpc_lib/long_mpc.py @@ -2,6 +2,7 @@ import os import time import numpy as np +from enum import IntEnum from cereal import log from opendbc.car.interfaces import ACCEL_MIN, ACCEL_MAX from openpilot.common.realtime import DT_MDL @@ -22,8 +23,6 @@ EXPORT_DIR = os.path.join(LONG_MPC_DIR, "c_generated_code") JSON_FILE = os.path.join(LONG_MPC_DIR, "acados_ocp_long.json") -SOURCES = ['lead0', 'lead1', 'cruise', 'e2e'] - X_DIM = 3 U_DIM = 1 PARAM_DIM = 6 @@ -54,10 +53,17 @@ T_DIFFS = np.diff(T_IDXS, prepend=[0.]) COMFORT_BRAKE = 2.5 STOP_DISTANCE = 6.0 -CRUISE_MIN_ACCEL = -1.2 -CRUISE_MAX_ACCEL = 1.6 MIN_X_LEAD_FACTOR = 0.5 + +class Source(IntEnum): + CRUISE = 0 + LEAD0 = 1 + LEAD1 = 2 + LEAD2 = 3 + E2E = 4 + + def get_jerk_factor(personality=log.LongitudinalPersonality.standard): if personality==log.LongitudinalPersonality.relaxed: return 1.0 @@ -217,7 +223,7 @@ def __init__(self, dt=DT_MDL): self.dt = dt self.solver = AcadosOcpSolverCython(MODEL_NAME, ACADOS_SOLVER_TYPE, N) self.reset() - self.source = SOURCES[2] + self.lead_source = Source.LEAD0 def reset(self): self.solver.reset() @@ -306,15 +312,15 @@ def process_lead(self, lead): # MPC will not converge if immediate crash is expected # Clip lead distance to what is still possible to brake for min_x_lead = MIN_X_LEAD_FACTOR * (v_ego + v_lead) * (v_ego - v_lead) / (-ACCEL_MIN * 2) + min_x_lead = max(STOP_DISTANCE, min_x_lead) x_lead = np.clip(x_lead, min_x_lead, 1e8) v_lead = np.clip(v_lead, 0.0, 1e8) a_lead = np.clip(a_lead, -10., 5.) lead_xv = self.extrapolate_lead(x_lead, v_lead, a_lead, a_lead_tau) return lead_xv - def update(self, radarstate, v_cruise, personality=log.LongitudinalPersonality.standard): + def update(self, radarstate, personality=log.LongitudinalPersonality.standard): t_follow = get_T_FOLLOW(personality) - v_ego = self.x0[1] self.status = radarstate.leadOne.status or radarstate.leadTwo.status lead_xv_0 = self.process_lead(radarstate.leadOne) @@ -326,16 +332,9 @@ def update(self, radarstate, v_cruise, personality=log.LongitudinalPersonality.s lead_0_obstacle = lead_xv_0[:,0] + get_stopped_equivalence_factor(lead_xv_0[:,1]) lead_1_obstacle = lead_xv_1[:,0] + get_stopped_equivalence_factor(lead_xv_1[:,1]) - # Fake an obstacle for cruise, this ensures smooth acceleration to set speed - # when the leads are no factor. - v_lower = v_ego + (T_IDXS * CRUISE_MIN_ACCEL * 1.05) - # TODO does this make sense when max_a is negative? - v_upper = v_ego + (T_IDXS * CRUISE_MAX_ACCEL * 1.05) - v_cruise_clipped = np.clip(v_cruise * np.ones(N+1), v_lower, v_upper) - cruise_obstacle = np.cumsum(T_DIFFS * v_cruise_clipped) + get_safe_obstacle_distance(v_cruise_clipped, t_follow) - - x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle, cruise_obstacle]) - self.source = SOURCES[np.argmin(x_obstacles[0])] + x_obstacles = np.column_stack([lead_0_obstacle, lead_1_obstacle]) + lead_idx = np.argmin(x_obstacles[0]) + self.lead_source = Source.LEAD0 if lead_idx == 0 else Source.LEAD1 self.yref[:,:] = 0.0 for i in range(N): diff --git a/selfdrive/controls/lib/longitudinal_planner.py b/selfdrive/controls/lib/longitudinal_planner.py index ad84ecf24fa0bb..e64375f5f5fa11 100755 --- a/selfdrive/controls/lib/longitudinal_planner.py +++ b/selfdrive/controls/lib/longitudinal_planner.py @@ -9,22 +9,27 @@ from openpilot.common.realtime import DT_MDL from openpilot.selfdrive.modeld.constants import ModelConstants from openpilot.selfdrive.controls.lib.longcontrol import LongCtrlState -from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc, SOURCES +from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import LongitudinalMpc, Source from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import T_IDXS as T_IDXS_MPC -from openpilot.selfdrive.controls.lib.drive_helpers import CONTROL_N, get_accel_from_plan +from openpilot.selfdrive.controls.lib.drive_helpers import CONTROL_N, get_accel_from_plan, smooth_value from openpilot.selfdrive.car.cruise import V_CRUISE_MAX, V_CRUISE_UNSET from openpilot.common.swaglog import cloudlog +CRUISE_MIN_ACCEL = -1.2 A_CRUISE_MAX_VALS = [1.6, 1.2, 0.8, 0.6] A_CRUISE_MAX_BP = [0., 10.0, 25., 40.] CONTROL_N_T_IDX = ModelConstants.T_IDXS[:CONTROL_N] ALLOW_THROTTLE_THRESHOLD = 0.4 MIN_ALLOW_THROTTLE_SPEED = 2.5 +K_CRUISE = 0.75 +TAU_CRUISE = 0.4 + # Lookup table for turns _A_TOTAL_MAX_V = [1.7, 3.2] _A_TOTAL_MAX_BP = [20., 40.] + def get_max_accel(v_ego): return np.interp(v_ego, A_CRUISE_MAX_BP, A_CRUISE_MAX_VALS) @@ -49,13 +54,13 @@ class LongitudinalPlanner: def __init__(self, CP, init_v=0.0, init_a=0.0, dt=DT_MDL): self.CP = CP self.mpc = LongitudinalMpc(dt=dt) + self.source = Source.CRUISE self.fcw = False self.dt = dt self.allow_throttle = True self.a_desired = init_a self.v_desired_filter = FirstOrderFilter(init_v, 2.0, self.dt) - self.prev_accel_clip = [ACCEL_MIN, ACCEL_MAX] self.output_a_target = 0.0 self.output_should_stop = False @@ -65,23 +70,11 @@ def __init__(self, CP, init_v=0.0, init_a=0.0, dt=DT_MDL): @staticmethod def parse_model(model_msg): - if (len(model_msg.position.x) == ModelConstants.IDX_N and - len(model_msg.velocity.x) == ModelConstants.IDX_N and - len(model_msg.acceleration.x) == ModelConstants.IDX_N): - x = np.interp(T_IDXS_MPC, ModelConstants.T_IDXS, model_msg.position.x) - v = np.interp(T_IDXS_MPC, ModelConstants.T_IDXS, model_msg.velocity.x) - a = np.interp(T_IDXS_MPC, ModelConstants.T_IDXS, model_msg.acceleration.x) - j = np.zeros(len(T_IDXS_MPC)) - else: - x = np.zeros(len(T_IDXS_MPC)) - v = np.zeros(len(T_IDXS_MPC)) - a = np.zeros(len(T_IDXS_MPC)) - j = np.zeros(len(T_IDXS_MPC)) if len(model_msg.meta.disengagePredictions.gasPressProbs) > 1: throttle_prob = model_msg.meta.disengagePredictions.gasPressProbs[1] else: throttle_prob = 1.0 - return x, v, a, j, throttle_prob + return throttle_prob def update(self, sm): if len(sm['carControl'].orientationNED) == 3: @@ -116,7 +109,7 @@ def update(self, sm): # Prevent divergence, smooth in current v_ego self.v_desired_filter.x = max(0.0, self.v_desired_filter.update(v_ego)) - _, _, _, _, throttle_prob = self.parse_model(sm['modelV2']) + throttle_prob = self.parse_model(sm['modelV2']) # Don't clip at low speeds since throttle_prob doesn't account for creep self.allow_throttle = throttle_prob > ALLOW_THROTTLE_THRESHOLD or v_ego <= MIN_ALLOW_THROTTLE_SPEED @@ -130,7 +123,7 @@ def update(self, sm): self.mpc.set_weights(prev_accel_constraint, personality=sm['selfdriveState'].personality) self.mpc.set_cur_state(self.v_desired_filter.x, self.a_desired) - self.mpc.update(sm['radarState'], v_cruise, personality=sm['selfdriveState'].personality) + self.mpc.update(sm['radarState'], personality=sm['selfdriveState'].personality) self.v_desired_trajectory = np.interp(CONTROL_N_T_IDX, T_IDXS_MPC, self.mpc.v_solution) self.a_desired_trajectory = np.interp(CONTROL_N_T_IDX, T_IDXS_MPC, self.mpc.a_solution) @@ -141,29 +134,31 @@ def update(self, sm): if self.fcw: cloudlog.info("FCW triggered") - # Interpolate 0.05 seconds and save as starting point for next iteration - a_prev = self.a_desired - self.a_desired = float(np.interp(self.dt, CONTROL_N_T_IDX, self.a_desired_trajectory)) - self.v_desired_filter.x = self.v_desired_filter.x + self.dt * (self.a_desired + a_prev) / 2.0 - + out_accels = {} action_t = self.CP.longitudinalActuatorDelay + DT_MDL - output_a_target_mpc, output_should_stop_mpc = get_accel_from_plan(self.v_desired_trajectory, self.a_desired_trajectory, CONTROL_N_T_IDX, - action_t=action_t, vEgoStopping=self.CP.vEgoStopping) - output_a_target_e2e = sm['modelV2'].action.desiredAcceleration - output_should_stop_e2e = sm['modelV2'].action.shouldStop - - if (output_a_target_e2e < output_a_target_mpc) and sm['selfdriveState'].experimentalMode: - output_a_target = output_a_target_e2e - self.output_should_stop = output_should_stop_e2e - self.mpc.source = SOURCES[3] - else: - output_a_target = output_a_target_mpc - self.output_should_stop = output_should_stop_mpc + out_accels[self.mpc.lead_source] = get_accel_from_plan( + self.v_desired_trajectory, self.a_desired_trajectory, CONTROL_N_T_IDX, action_t, self.CP.vEgoStopping) + if sm['selfdriveState'].experimentalMode: + out_accels[Source.E2E] = (sm['modelV2'].action.desiredAcceleration, sm['modelV2'].action.shouldStop) + + cruise_accel = K_CRUISE * (v_cruise - v_ego) + cruise_accel = np.clip(cruise_accel, CRUISE_MIN_ACCEL, accel_clip[1]) + cruise_accel = smooth_value(cruise_accel, self.output_a_target, TAU_CRUISE) + out_accels[Source.CRUISE] = (cruise_accel, False) + + source, (output_a_target, _) = min(out_accels.items(), key=lambda x: x[1][0]) + self.source = source + self.output_should_stop = any(should_stop for _, should_stop in out_accels.values()) - for idx in range(2): - accel_clip[idx] = np.clip(accel_clip[idx], self.prev_accel_clip[idx] - 0.05, self.prev_accel_clip[idx] + 0.05) self.output_a_target = np.clip(output_a_target, accel_clip[0], accel_clip[1]) - self.prev_accel_clip = accel_clip + + # Interpolate 0.05 seconds and save as starting point for next iteration + a_prev = self.a_desired + if self.source == self.mpc.lead_source: + self.a_desired = float(np.interp(self.dt, CONTROL_N_T_IDX, self.a_desired_trajectory)) + else: + self.a_desired = self.output_a_target + self.v_desired_filter.x = self.v_desired_filter.x + self.dt * (self.a_desired + a_prev) / 2.0 def publish(self, sm, pm): plan_send = messaging.new_message('longitudinalPlan') @@ -180,7 +175,7 @@ def publish(self, sm, pm): longitudinalPlan.jerks = self.j_desired_trajectory.tolist() longitudinalPlan.hasLead = sm['radarState'].leadOne.status - longitudinalPlan.longitudinalPlanSource = self.mpc.source + longitudinalPlan.longitudinalPlanSource = self.source longitudinalPlan.fcw = self.fcw longitudinalPlan.aTarget = float(self.output_a_target) diff --git a/selfdrive/test/longitudinal_maneuvers/maneuver.py b/selfdrive/test/longitudinal_maneuvers/maneuver.py index dfd5b3e109b726..ba0379f2d725bf 100644 --- a/selfdrive/test/longitudinal_maneuvers/maneuver.py +++ b/selfdrive/test/longitudinal_maneuvers/maneuver.py @@ -60,7 +60,8 @@ def evaluate(self): log['distance_lead'], log['speed'], speed_lead, - log['acceleration']])) + log['acceleration'], + log['d_rel']])) if d_rel < .4 and (self.only_radar or prob_lead > 0.5): print("Crashed!!!!") diff --git a/tools/longitudinal_maneuvers/mpc_longitudinal_tuning_report.py b/tools/longitudinal_maneuvers/mpc_longitudinal_tuning_report.py index 583c6240e525a7..8c1a60f5b76f18 100644 --- a/tools/longitudinal_maneuvers/mpc_longitudinal_tuning_report.py +++ b/tools/longitudinal_maneuvers/mpc_longitudinal_tuning_report.py @@ -7,16 +7,18 @@ from openpilot.selfdrive.controls.tests.test_following_distance import desired_follow_distance TIME = 0 +LEAD_DISTANCE= 2 EGO_V = 3 EGO_A = 5 -LEAD_DISTANCE= 2 +D_REL = 6 axis_labels = ['Time (s)', 'Ego position (m)', - 'Lead distance (m)', + 'Lead absolute position (m)', 'Ego Velocity (m/s)', 'Lead Velocity (m/s)', 'Ego acceleration (m/s^2)', + 'Lead distance (m)' ] @@ -81,7 +83,7 @@ def get_html_from_results(results, labels, AXIS): htmls.append(markdown.markdown('# ' + name)) htmls.append(get_html_from_results(results, labels, EGO_A)) -htmls.append(get_html_from_results(results, labels, LEAD_DISTANCE)) +htmls.append(get_html_from_results(results, labels, D_REL)) results = {} @@ -102,6 +104,7 @@ def get_html_from_results(results, labels, AXIS): labels.append(f'{oscil} m/s oscilliation size') htmls.append(markdown.markdown('# ' + name)) +htmls.append(get_html_from_results(results, labels, D_REL)) htmls.append(get_html_from_results(results, labels, EGO_V)) htmls.append(get_html_from_results(results, labels, EGO_A)) @@ -126,7 +129,7 @@ def get_html_from_results(results, labels, AXIS): htmls.append(markdown.markdown('# ' + name)) htmls.append(get_html_from_results(results, labels, EGO_V)) -htmls.append(get_html_from_results(results, labels, LEAD_DISTANCE)) +htmls.append(get_html_from_results(results, labels, D_REL)) results = {} @@ -148,7 +151,7 @@ def get_html_from_results(results, labels, AXIS): htmls.append(markdown.markdown('# ' + name)) htmls.append(get_html_from_results(results, labels, EGO_V)) -htmls.append(get_html_from_results(results, labels, LEAD_DISTANCE)) +htmls.append(get_html_from_results(results, labels, D_REL)) results = {} @@ -170,7 +173,7 @@ def get_html_from_results(results, labels, AXIS): htmls.append(markdown.markdown('# ' + name)) htmls.append(get_html_from_results(results, labels, EGO_A)) -htmls.append(get_html_from_results(results, labels, LEAD_DISTANCE)) +htmls.append(get_html_from_results(results, labels, D_REL)) results = {} @@ -193,7 +196,7 @@ def get_html_from_results(results, labels, AXIS): htmls.append(markdown.markdown('# ' + name)) htmls.append(get_html_from_results(results, labels, EGO_A)) -htmls.append(get_html_from_results(results, labels, LEAD_DISTANCE)) +htmls.append(get_html_from_results(results, labels, D_REL)) results = {} diff --git a/tools/replay/ui.py b/tools/replay/ui.py index 54667ebfe967f6..2618f62a8ca773 100755 --- a/tools/replay/ui.py +++ b/tools/replay/ui.py @@ -239,7 +239,7 @@ def ui_thread(addr): ("ENABLED", GREEN if sm['selfdriveState'].enabled else BLACK), ("SPEED: " + str(round(sm['carState'].vEgo, 1)) + " m/s", YELLOW), ("LONG CONTROL STATE: " + str(sm['controlsState'].longControlState), YELLOW), - ("LONG MPC SOURCE: " + str(sm['longitudinalPlan'].longitudinalPlanSource), YELLOW), + ("LONG MPC SOURCE: " + str(sm['longitudinalPlan'].longitudinalPlanSource.name), YELLOW), None, ("ANGLE OFFSET (AVG): " + str(round(sm['liveParameters'].angleOffsetAverageDeg, 2)) + " deg", YELLOW), ("ANGLE OFFSET (INSTANT): " + str(round(sm['liveParameters'].angleOffsetDeg, 2)) + " deg", YELLOW),