From c66b24e078d42a9145ce1f56607c42f5e8f27cd8 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 14 Nov 2022 11:48:53 -0500 Subject: [PATCH 01/25] Experimental plotting update --- main/src/vtr_lidar/CMakeLists.txt | 28 ++++++++++++++++++ .../modules/odometry/odometry_icp_module.cpp | 8 ++--- main/src/vtr_navigation/CMakeLists.txt | 28 ++++++++++++++++++ main/src/vtr_path_planning/CMakeLists.txt | 29 +++++++++++++++++++ .../cbit/cbit_path_planner.hpp | 2 ++ .../modules/odometry/odometry_icp_module.cpp | 8 ++--- 6 files changed, 95 insertions(+), 8 deletions(-) diff --git a/main/src/vtr_lidar/CMakeLists.txt b/main/src/vtr_lidar/CMakeLists.txt index 806239eeb..befb57ad1 100644 --- a/main/src/vtr_lidar/CMakeLists.txt +++ b/main/src/vtr_lidar/CMakeLists.txt @@ -71,6 +71,34 @@ ament_target_dependencies(${PROJECT_NAME}_path_planning lgmath steam ) +# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES +# find python libraries +find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) +find_package(PythonLibs 3.0 REQUIRED) +include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) + +# populate matplotlib repository +include(FetchContent) +FetchContent_Declare( + matplotlib + GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git + GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 +) +FetchContent_GetProperties(matplotlib) +if(NOT matplotlib_POPULATED) + FetchContent_Populate(matplotlib) +endif() +include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) + +target_link_libraries(${PROJECT_NAME}_path_planning ${PYTHON_LIBRARIES}) + + +find_package(PythonLibs REQUIRED) +include_directories(${PYTHON_INCLUDE_DIRS}) +target_link_libraries(${PROJECT_NAME}_path_planning ${PYTHON_LIBRARIES}) +# END OF JORDY EXPERIMENTAL MATPLOTLIBCPP Changes FOR PLOTTING + + # additional tools for experiments file(GLOB_RECURSE TOOLS_SRC src/mesh2pcd/*.cpp diff --git a/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp b/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp index b5ddd76bd..ac7620c92 100644 --- a/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp +++ b/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp @@ -502,10 +502,10 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, } *qdata.T_r_m_odo = T_r_m_eval->value(); *qdata.timestamp_odo = query_stamp; -#if 1 - CLOG(WARNING, "lidar.odometry_icp") << "T_m_r is: " << qdata.T_r_m_odo->inverse().vec().transpose(); - CLOG(WARNING, "lidar.odometry_icp") << "w_m_r_in_r is: " << qdata.w_m_r_in_r_odo->transpose(); -#endif +//#if 1 +// CLOG(WARNING, "lidar.odometry_icp") << "T_m_r is: " << qdata.T_r_m_odo->inverse().vec().transpose(); +// CLOG(WARNING, "lidar.odometry_icp") << "w_m_r_in_r is: " << qdata.w_m_r_in_r_odo->transpose(); +//#endif // /// \todo double check validity when no vertex has been created *qdata.T_r_v_odo = T_r_m_icp * sliding_map_odo.T_vertex_this().inverse(); diff --git a/main/src/vtr_navigation/CMakeLists.txt b/main/src/vtr_navigation/CMakeLists.txt index 60821d024..adf45363d 100644 --- a/main/src/vtr_navigation/CMakeLists.txt +++ b/main/src/vtr_navigation/CMakeLists.txt @@ -103,6 +103,34 @@ target_include_directories(${PROJECT_NAME} $ $) + +# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES +# find python libraries +find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) +find_package(PythonLibs 3.0 REQUIRED) +include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) + +# populate matplotlib repository +include(FetchContent) +FetchContent_Declare( + matplotlib + GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git + GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 +) +FetchContent_GetProperties(matplotlib) +if(NOT matplotlib_POPULATED) + FetchContent_Populate(matplotlib) +endif() +include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) + +target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) + + +find_package(PythonLibs REQUIRED) +include_directories(${PYTHON_INCLUDE_DIRS}) +target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) +# END OF JORDY EXPERIMENTAL MATPLOTLIBCPP Changes FOR PLOTTING + install( DIRECTORY include/ DESTINATION include diff --git a/main/src/vtr_path_planning/CMakeLists.txt b/main/src/vtr_path_planning/CMakeLists.txt index dbd724624..88cac57e9 100644 --- a/main/src/vtr_path_planning/CMakeLists.txt +++ b/main/src/vtr_path_planning/CMakeLists.txt @@ -70,6 +70,35 @@ file(GLOB_RECURSE SRC src/cbit/utils.cpp) file(GLOB_RECURSE SRC src/cbit/cbit_path_planner.cpp) +# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES (note also had to make similar changes in vtr_lidar and vtr_navigation cmakelist.txt also for some dumb reason) +# find python libraries +find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) +find_package(PythonLibs 3.0 REQUIRED) +include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) + +# populate matplotlib repository +include(FetchContent) +FetchContent_Declare( + matplotlib + GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git + GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 +) +FetchContent_GetProperties(matplotlib) +if(NOT matplotlib_POPULATED) + FetchContent_Populate(matplotlib) +endif() +include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) + +target_link_libraries(${PROJECT_NAME}_cbit ${PYTHON_LIBRARIES}) + + +find_package(PythonLibs REQUIRED) +include_directories(${PYTHON_INCLUDE_DIRS}) +target_link_libraries(${PROJECT_NAME}_cbit ${PYTHON_LIBRARIES}) +# EXPERIMENTAL, JORDY ADDITION FOR MATPLOTLIBCPP PLOTTING +#target_link_libraries($vtr_path_planning ${PYTHON_LIBRARIES} Python3::NumPy) +# END OF EXPERIMENTAL MATPLOTLIBCPP + ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) ament_export_dependencies( rclcpp tf2 tf2_ros tf2_eigen geometry_msgs diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 7ef469661..8cfd12cae 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -27,6 +27,7 @@ #include #include // For benchmarking #include // For debug sleeps +#include "matplotlibcpp.h" // experimental plotting #include "vtr_logging/logging.hpp" #include "vtr_path_planning/base_path_planner.hpp" @@ -34,6 +35,7 @@ #include "vtr_path_planning/cbit/generate_pq.hpp" #include "vtr_path_planning/cbit/cbit_config.hpp" #include "vtr_path_planning/cbit/cbit_costmap.hpp" +//#include "vtr_path_planning/cbit/matplotlibcpp.h" // experimental plotting util #include "vtr_tactic/tactic.hpp" #pragma once diff --git a/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp b/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp index 918531993..1dc238cc0 100644 --- a/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp +++ b/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp @@ -549,10 +549,10 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, } *qdata.T_r_m_odo = T_r_m_eval->value(); *qdata.timestamp_odo = query_stamp; -#if 1 - CLOG(WARNING, "radar.odometry_icp") << "T_m_r is: " << qdata.T_r_m_odo->inverse().vec().transpose(); - CLOG(WARNING, "radar.odometry_icp") << "w_m_r_in_r is: " << qdata.w_m_r_in_r_odo->transpose(); -#endif +//#if 1 +// CLOG(WARNING, "radar.odometry_icp") << "T_m_r is: " << qdata.T_r_m_odo->inverse().vec().transpose(); +// CLOG(WARNING, "radar.odometry_icp") << "w_m_r_in_r is: " << qdata.w_m_r_in_r_odo->transpose(); +//#endif // /// \todo double check validity when no vertex has been created *qdata.T_r_v_odo = T_r_m_icp * sliding_map_odo.T_vertex_this().inverse(); From 47319f86d2acfc2cf27369eadd93f281aadfc599 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 15 Nov 2022 11:24:24 -0500 Subject: [PATCH 02/25] Added plotting debug support to the planner --- config/honeycomb_grizzly_default.yaml | 2 +- main/src/deps/steam | 2 +- main/src/vtr_path_planning/CMakeLists.txt | 3 + .../cbit/cbit_path_planner.hpp | 3 +- .../vtr_path_planning/cbit/cbit_plotting.hpp | 32 +++++ .../src/cbit/cbit_path_planner.cpp | 8 ++ .../src/cbit/cbit_plotting.cpp | 117 ++++++++++++++++++ 7 files changed, 163 insertions(+), 4 deletions(-) create mode 100644 main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp create mode 100644 main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 62f6d8a36..641beb4d5 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -24,7 +24,7 @@ # "mpc.cbit", # "mpc_debug.cbit", # mission planner - #"mission.server", + "mission.server", #"mission.state_machine", # pose graph # "pose_graph", diff --git a/main/src/deps/steam b/main/src/deps/steam index 24fca211d..53060505f 160000 --- a/main/src/deps/steam +++ b/main/src/deps/steam @@ -1 +1 @@ -Subproject commit 24fca211dc322fd41663c6a9cd5c350a19921de2 +Subproject commit 53060505f4ac84315ee28829f664895d1da56ff9 diff --git a/main/src/vtr_path_planning/CMakeLists.txt b/main/src/vtr_path_planning/CMakeLists.txt index 88cac57e9..ba3c80e0a 100644 --- a/main/src/vtr_path_planning/CMakeLists.txt +++ b/main/src/vtr_path_planning/CMakeLists.txt @@ -71,6 +71,9 @@ file(GLOB_RECURSE SRC src/cbit/utils.cpp) file(GLOB_RECURSE SRC src/cbit/cbit_path_planner.cpp) # START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES (note also had to make similar changes in vtr_lidar and vtr_navigation cmakelist.txt also for some dumb reason) +# Declare the plotting tools +file(GLOB_RECURSE SRC src/cbit/cbit_plotting.cpp) + # find python libraries find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) find_package(PythonLibs 3.0 REQUIRED) diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 8cfd12cae..5862710d0 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -27,7 +27,6 @@ #include #include // For benchmarking #include // For debug sleeps -#include "matplotlibcpp.h" // experimental plotting #include "vtr_logging/logging.hpp" #include "vtr_path_planning/base_path_planner.hpp" @@ -35,7 +34,7 @@ #include "vtr_path_planning/cbit/generate_pq.hpp" #include "vtr_path_planning/cbit/cbit_config.hpp" #include "vtr_path_planning/cbit/cbit_costmap.hpp" -//#include "vtr_path_planning/cbit/matplotlibcpp.h" // experimental plotting util +#include "vtr_path_planning/cbit/cbit_plotting.hpp" #include "vtr_tactic/tactic.hpp" #pragma once diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp new file mode 100644 index 000000000..83974a017 --- /dev/null +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp @@ -0,0 +1,32 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file cbit_plotting.hpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ + +// Contains Matplotlibcpp debug plotting tools for the cbit planner +// This library is not meant to be included in the main vt&r branch, debug only + +#include +#include "matplotlibcpp.h" // experimental plotting +#include "vtr_path_planning/cbit/utils.hpp" + +#pragma once + +void plot_tree(Tree current_tree, Node robot_pq, std::vector path_p, std::vector path_q, std::vector> samples); +void plot_robot(Node robot_pq); + +void initialize_plot(); \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 5387282d5..a0a2a8342 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -73,6 +73,9 @@ CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, p_start = std::make_shared (global_path->p.back(), 0.0); dynamic_window_width = conf.sliding_window_width; + + // DEBUG PLOTTING + initialize_plot(); InitializePlanningSpace(); Planning(robot_state, costmap_ptr); @@ -335,6 +338,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Perform a state update to convert the actual robot position to its corresponding pq space: p_goal = UpdateState(); + // EXPERIMENTAL TODO: If robot pose nears the goal (p_start), exit the planner with the current solution //CLOG(ERROR, "path_planning") << "Trying the distance to goal is: " << abs(p_goal->p - p_start->p); //if (abs(p_goal->p - p_start->p) <= 2.0) @@ -437,6 +441,10 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Reset the start time start_time = std::chrono::high_resolution_clock::now(); //std::cout << "Made it to end of new batch process code" << std::endl; + + //DEBUG PLOTTING + plot_tree(tree, *p_goal, path_x, path_y, samples); + } diff --git a/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp new file mode 100644 index 000000000..255163954 --- /dev/null +++ b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp @@ -0,0 +1,117 @@ +#include "vtr_path_planning/cbit/cbit_plotting.hpp" + + +void initialize_plot() +{ + // Testing plots + matplotlibcpp::figure_size(700, 1400); // Larger more useful size for debugging + //matplotlibcpp::figure_size(350, 700); // For mini more practical version for everyday use + matplotlibcpp::ylim(-2, 7); + matplotlibcpp::xlim(-3, 3); + matplotlibcpp::title("Curvilinear Batch Informed Trees (C++)"); + matplotlibcpp::xlabel("q [m]"); + matplotlibcpp::ylabel("p [m]"); + + // TODO: Configure Title, Legend + + //matplotlibcpp::axis("equal"); // Didnt like the axis equal behaviour + matplotlibcpp::draw(); + matplotlibcpp::pause(0.001); + return; +} + +// Function for plotting all the edges in the current tree (Called at the conclusion of each batch) +// Note this will also get triggered following state updates and repairs as well +void plot_tree(Tree tree, Node robot_pq, std::vector path_p, std::vector path_q, std::vector> samples) +{ + // Clear the figure + matplotlibcpp::clf(); + + // Configure plotting keyword settings + std::unordered_map keywords_robot; + keywords_robot["color"] = "lime"; + + + // Set sliding window based on current robot state + matplotlibcpp::ylim((robot_pq.p - 1), (robot_pq.p + 11)); + matplotlibcpp::xlim(-3,3); // TODO: replace with param + + + // Iterate through the current tree, plot each edge + for (int i = 0; i < tree.E.size(); i++) + { + std::shared_ptr vm = std::get<0>(tree.E[i]); + std::shared_ptr xm = std::get<1>(tree.E[i]); + std::vector plot_p_edge = {vm->p,xm->p}; + std::vector plot_q_edge = {-1*vm->q,-1*xm->q}; + matplotlibcpp::plot(plot_q_edge, plot_p_edge, "c"); + } + // Need one stand in point to populate the legend + std::vector dummy_edge = {0}; + matplotlibcpp::named_plot("Edges", dummy_edge, dummy_edge, "c"); + + + // Plot the current path solution + // Invert the q values + for (int j= 0; j < path_q.size(); j++) + { + path_q[j] = path_q[j] * -1; + } + matplotlibcpp::named_plot("BIT* Path", path_q, path_p, "b"); + + + // Plot a hashed centre line for the taught path + std::vector taught_p = {0,(*tree.V[0]).p}; + std::vector taught_q = {0,0}; + matplotlibcpp::named_plot("Taught Path", taught_q, taught_p, "r--"); + + + // Plot samples (optional, but seems like compute is okay so why not) + // I find its actually abit distracting having the samples and it doesnt really help very much so commenting out for now + /* + for (int k= 0; k < samples.size(); k++) + { + Node sample = *(samples[k]); + std::vector plot_p_sample= {sample.p,sample.p}; + std::vector plot_q_sample = {-1*sample.q,-1*sample.q}; + matplotlibcpp::plot(plot_q_sample, plot_p_sample, ".k"); + } + */ + + // Plot the current robot state + matplotlibcpp::named_plot("Robot State", {-1*robot_pq.q}, std::vector {robot_pq.p}, ".g"); + matplotlibcpp::scatter(std::vector {-1*robot_pq.q}, std::vector {robot_pq.p}, 200.0, keywords_robot); // try to change colour, need to implement + + + // Format Axis/title + matplotlibcpp::title("Batch Informed Trees"); + matplotlibcpp::xlabel("q [m]"); + matplotlibcpp::ylabel("p [m]"); + matplotlibcpp::legend(); + + + // Display and show while continuing script (adds 1ms delay to draw) + matplotlibcpp::draw(); + matplotlibcpp::pause(0.001); + return; +} + +// TODO: Experiment with a Euclidean version that I can display everything in. See whether I can discretize and convert all edges +// of the tree to euclidean space. + +// TODO: Test other plotting functions which update more regularly in places that are more relevant (like robot state updates, published path) +// I think you can do this, but the problem is clearing the figure, if the plots are asychronous this will be an issue. + +void plot_robot(Node robot_pq) +{ + // Configure plotting keyword settings + std::unordered_map keywords_robot; + keywords_robot["color"] = "lime"; + + // Plot the current robot state + matplotlibcpp::scatter(std::vector {-1*robot_pq.q}, std::vector {robot_pq.p}, 100.0, keywords_robot); // try to change colour, need to implement + + matplotlibcpp::draw(); + matplotlibcpp::pause(0.001); + return; +} \ No newline at end of file From d7ba50d923e0a6850a94a3a81ee32594fdb27ca2 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Sun, 27 Nov 2022 16:48:40 -0500 Subject: [PATCH 03/25] First cleanup commit, updated some functions to be more robust and improve performance --- config/honeycomb_grizzly_default.yaml | 4 +- .../cbit/cbit_path_planner.hpp | 2 + .../src/cbit/cbit_path_planner.cpp | 225 +++++++++++------- main/src/vtr_path_planning/src/cbit/utils.cpp | 4 +- 4 files changed, 147 insertions(+), 88 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 641beb4d5..ccfdffe1b 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -441,13 +441,13 @@ # Planner Tuning Params initial_samples: 250 - batch_samples: 100 + batch_samples: 150 pre_seed_resolution: 0.5 alpha: 0.5 q_max: 2.5 frame_interval: 50 iter_max: 10000000 - eta: 1.1 + eta: 1.0 rad_m_exhange: 1.00 initial_exp_rad: 0.75 extrapolation: false diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 5862710d0..5cdcac0ff 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -109,7 +109,9 @@ class CBITPlanner { std::vector ExtractEuclidPath(); void Prune(double c_best, double c_best_weighted); bool edge_in_tree(Node v, Node x); + bool edge_in_tree_v2(std::shared_ptr v, std::shared_ptr x); bool node_in_tree(Node x); + bool node_in_tree_v2(std::shared_ptr x); double cost_col(std::vector> obs, Node start, Node end); double weighted_cost_col(std::vector> obs, Node start, Node end); Node curve_to_euclid(Node node); diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index a0a2a8342..6b5be07ce 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -132,12 +132,12 @@ void CBITPlanner::InitializePlanningSpace() // Initialize obstacles // Permanent simulated obstacles can be populated here like so, but in the latest version obstacles are updated from the costmap //obs_rectangle = {{2.0, -2.0, 1, 3}, {3.0, 4.0, 3.0, 1.0}, {3.0, 8.0, 2.0, 2.0}}; // Obstacle format is {{x,y,w,h}} where x,y is the lower left corner coordinate - //obs_rectangle = {{2.0, -2.0, 1, 3}, {3.0, 4.0, 3.0, 1.0}}; + //obs_rectangle = {{4.0, -1.0, 2, 2}}; // for plotting debugger tests obs_rectangle = {}; // Initialize sliding window dimensions for plotting and radius expansion calc; sample_box_height = conf.q_max * 2.0; - sample_box_width = conf.sliding_window_width; + sample_box_width = conf.sliding_window_width + 2 * conf.sliding_window_freespace_padding; } @@ -277,7 +277,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo if ((p_goal->parent != nullptr) && (std::chrono::high_resolution_clock::now() >= state_update_time)) { // Update timers - CLOG(INFO, "path_planning.cbit_planner") << "Attempting to Update Robot State"; + //CLOG(INFO, "path_planning.cbit_planner") << "Attempting to Update Robot State"; state_update_time = std::chrono::high_resolution_clock::now() + std::chrono::milliseconds(control_period_ms); // get the euclidean robot state in the world frame from vt&r @@ -295,7 +295,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, curr_sid] = chain_info; - // Experimental Pose extrapolation (seems to work well) + // Experimental Pose extrapolation (I find often this doesnt work well with te direct path tracking method) + // It can be desireable to delay pruning the path behind where we think the robot is if (conf.extrapolation == true) { @@ -313,16 +314,16 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo }(); robot_pose = T2xyzrpy(T_w_p * T_p_r_extp); - CLOG(INFO, "path_planning.cbit_planner") << "Extrapolated Robot Pose: x: " << std::get<0>(robot_pose) << " y: " - << std::get<1>(robot_pose) << " z: " << std::get<2>(robot_pose) << " roll: " << std::get<3>(robot_pose) << " pitch: " - << std::get<4>(robot_pose) << " yaw: " << std::get<5>(robot_pose); + //CLOG(INFO, "path_planning.cbit_planner") << "Extrapolated Robot Pose: x: " << std::get<0>(robot_pose) << " y: " + //<< std::get<1>(robot_pose) << " z: " << std::get<2>(robot_pose) << " roll: " << std::get<3>(robot_pose) << " pitch: " + //<< std::get<4>(robot_pose) << " yaw: " << std::get<5>(robot_pose); } else { robot_pose= T2xyzrpy(T_w_p * T_p_r); - CLOG(INFO, "path_planning.cbit_planner") << "Robot Pose: x: " << std::get<0>(robot_pose) << " y: " - << std::get<1>(robot_pose) << " z: " << std::get<2>(robot_pose) << " roll: " << std::get<3>(robot_pose) << " pitch: " - << std::get<4>(robot_pose) << " yaw: " << std::get<5>(robot_pose); + //CLOG(INFO, "path_planning.cbit_planner") << "Robot Pose: x: " << std::get<0>(robot_pose) << " y: " + //<< std::get<1>(robot_pose) << " z: " << std::get<2>(robot_pose) << " roll: " << std::get<3>(robot_pose) << " pitch: " + //<< std::get<4>(robot_pose) << " yaw: " << std::get<5>(robot_pose); } @@ -367,7 +368,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Debug, check to see what p value we are spitting out - CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots updated state goal is now: p: " << p_goal->p << " q: " << p_goal->q; + //CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots updated state goal is now: p: " << p_goal->p << " q: " << p_goal->q; // Add the new goal (robot state) to the samples so it can be found again @@ -378,14 +379,14 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo tree.QE.clear(); for (int i = 0; i < tree.V.size(); i++) { - if (calc_dist(*(tree.V[i]), *p_goal) <= 1.0 ) // TODO: replace magic number with a param, represents radius to search for state update rewires + if (calc_dist(*(tree.V[i]), *p_goal) <= 2.0 ) // TODO: replace magic number with a param, represents radius to search for state update rewires { tree.QV.push_back(tree.V[i]); } } - CLOG(INFO, "path_planning.cbit_planner") << "Robot State Updated Successfully"; - CLOG(INFO, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); + CLOG(DEBUG, "path_planning.cbit_planner") << "Robot State Updated Successfully"; + //CLOG(INFO, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); // When the planner resumes, this will cause it to immediately try to rewire locally to the new robot state in a short amount of time @@ -424,7 +425,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Debug message for online use (wont use std::out long term) CLOG(INFO, "path_planning.cbit_planner") << "New Batch - Iteration: " << k << " Path Cost: " << p_goal->g_T_weighted << " Batch Compute Time (ms): " << duration.count(); - CLOG(INFO, "path_planning.cbit_planner") << "Tree Size: " << tree.V.size() << " Vertex Queue Size: " << tree.QV.size() << " Sample Size: " < (tree.V[i])); } + */ // Initialize the Vertex Queue; @@ -531,12 +534,13 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } // TODO: Dynamic Expansion radius selection: - /* + // Its debatable whether this works well or not, but going to try it + if (p_goal->g_T != INFINITY) { conf.initial_exp_rad = exp_radius((tree.V.size() + samples.size()), sample_box_height, sample_box_width, conf.eta); } - */ + } @@ -559,7 +563,9 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // TODO: Handle edge case where a perfect path results in empty queues (see python code) if (vm == NULL) { - break; + // If we just continue here, we enter a state where after every single iteration we will complete a batch, but I think this is fine + // It will just stay this way until we move off the path and have a non perfect path solution + continue; } // Remove the edge from the queue (I think best to do this inside BestInEdgeQueue function) @@ -579,7 +585,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo if (vm->g_T_weighted + weighted_cost < xm->g_T_weighted) { // Check if xm is in the tree, if it is, we need to do some rewiring of any other edge which has xm as an endpoint - if (node_in_tree(*xm) == true) + if (node_in_tree_v2(xm) == true) { // TODO: Needs some additional wormhole logic in here which I havent done yet @@ -587,7 +593,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo int upper_edge_index = tree.E.size(); for (int i = 0; i < upper_edge_index; i++) { - if (std::get<1>(tree.E[i])->p == xm->p && std::get<1>(tree.E[i])->q == xm->q) + if (std::get<1>(tree.E[i]) == xm) // changed to equating pointer addresses { // Once we process the vertex and have it ready to return, remove it from the edge queue (This is a fast way of doing so) @@ -609,7 +615,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Remove it from the samples, add it to the vertex tree and vertex queue: for (int i = 0; i < samples.size(); i++) { - if (samples[i]->p == xm->p && samples[i]->q == xm->q) + if (samples[i] == xm) { auto it = samples.begin() + i; *it = std::move(samples.back()); @@ -625,23 +631,21 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Set cost to comes xm->g_T_weighted = vm->g_T_weighted + weighted_cost; xm->g_T = vm->g_T + actual_cost; - + xm->parent = vm; // TODO: wormhole handling // Generate edge, create parent chain tree.E.push_back(std::tuple, std::shared_ptr> {vm, xm}); - - - xm->parent = vm; + // Filter edge queue as now any other edge with worse cost heuristic can be ignored int upper_queue_index = tree.QE.size(); for (int i = 0; i < upper_queue_index; i++) { Node v = *std::get<0>(tree.QE[i]); - Node x = *std::get<1>(tree.QE[i]); + std::shared_ptr x = std::get<1>(tree.QE[i]); - if (x.p == xm->p && x.q == xm->q && (v.g_T_weighted + calc_weighted_dist(v, *xm, conf.alpha) >= xm->g_T_weighted)) + if ((x == xm) && (v.g_T_weighted + calc_weighted_dist(v, *xm, conf.alpha) >= xm->g_T_weighted)) { auto it = tree.QE.begin() + i; *it = std::move(tree.QE.back()); @@ -670,12 +674,20 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo + + // FAULT AND EARLY MAIN LOOP EXIT HANDLING // If we make it here and are still localized, then try to reset the planner - if (localization_flag == true) - { - HardReset(robot_state, costmap_ptr); + //if (localization_flag == true) + //{ + //HardReset(robot_state, costmap_ptr); + + + + + + // The below code was all wrapped into HardReset, still experimental however. /* @@ -733,12 +745,16 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo Planning(robot_state, costmap_ptr); */ - } - else - { + + + + + //} + //else + //{ // But if we arent localized, then there is really nothing else we can do but return an error and end the planning session - CLOG(ERROR, "path_planning.cbit_planner") << "Localization Failed, Terminating the Planning Instance"; - } + // CLOG(ERROR, "path_planning.cbit_planner") << "Localization Failed, Terminating the Planning Instance"; + //} } // End of main planning function @@ -784,8 +800,8 @@ std::vector> CBITPlanner::SampleBox(int m) Node node(rand_p, rand_q); // TODO: Before we add the sample to the sample vector, we need to collision check it - //if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) // legacy version with obstacle vectors - if (costmap_col(curve_to_euclid(node))) // Using costmap for collision check + if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) // legacy version with obstacle vectors + //if (costmap_col(curve_to_euclid(node))) // Using costmap for collision check { continue; @@ -825,8 +841,8 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) Node node(rand_p, rand_q); // Before we add the sample to the sample vector, we need to collision check it - //if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) - if (costmap_col(curve_to_euclid(node))) + if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) + //if (costmap_col(curve_to_euclid(node))) { continue; } @@ -847,7 +863,7 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) // In the python version I do this line thing which is more robust, but for now im going to do this quick and dirty double p_step = 0.25; double p_val = p_zero; // needed to modify the starting i to be p_zero - for (int i = 0; i < (pre_seeds); i++) + for (int i = 0; i < (pre_seeds-1); i++) { Node node((p_val+p_step), 0); @@ -917,34 +933,27 @@ void CBITPlanner::Prune(double c_best, double c_best_weighted) } // We also check the tree and add samples for unconnected vertices back to the sample set - // paper says this is to maintain uniform sample density, but practically I dont see how any tree vertex would have infinite cost to come? + //We also do a prune of the vertex tree by heuristic //&& (samples[i]->g_T != INFINITY)) - for (int i = 0; i < tree.V.size(); i++) + std::vector> vertex_pruned; + int vertices_size = tree.V.size(); + for (int i = 0; i < vertices_size; i++) { if (tree.V[i]->g_T_weighted == INFINITY) { samples_pruned.push_back(std::shared_ptr (tree.V[i])); } - } - // After we do both the above loops update the sample vector - samples = samples_pruned; - - - - // Similar prune of the Vertex Tree - // TODO: Wormhole accomodations - - std::vector> vertex_pruned; - for (int i = 0; i g_T_weighted < INFINITY)) { vertex_pruned.push_back(std::shared_ptr (tree.V[i])); } } + // After we do both the above loops update the sample vector + samples = samples_pruned; tree.V = vertex_pruned; - + + // TODO: Wormhole accomodations // Similar Prune of the Edges // TODO:Wormhole accomodations @@ -1188,7 +1197,10 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) } } - // DEBUG + // I found the below code, while does follow the original algorithm, tends not to hurt compute performance + // yet allows the pre-seeded samples to much much better in subsequent iterations + // Removing this lets the tree use old verticies to find better paths more often (but subequently means you check more) + /* // First check to see that v is not in the old tree already, if it is return early for (int i = 0; i < tree.V_Old.size(); i++) { @@ -1198,7 +1210,8 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) return; } } - // END OF DEBUG + */ + // find nearby vertices and filter by heuristic potential for (int i = 0; i < tree.V.size(); i++) @@ -1208,6 +1221,15 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) //auto test3 = (v->g_T_weighted + calc_weighted_dist(*v, *tree.V[i], conf.alpha)); if ((calc_dist(*tree.V[i], *v) <= conf.initial_exp_rad) && ((g_estimated_admissible(*v, *p_start) + calc_weighted_dist(*v, *tree.V[i], conf.alpha) + h_estimated_admissible(*tree.V[i], *p_goal)) < p_goal->g_T_weighted) && ((v->g_T_weighted + calc_weighted_dist(*v, *tree.V[i], conf.alpha)) < tree.V[i]->g_T_weighted)) { + if (edge_in_tree_v2(v, tree.V[i]) == false) + { + // If all conditions satisfied, add the edge to the queue + //tree.V[i]->g_T = INFINITY; // huh actually looking at this again, im not sure if I should be doing this (yeah I think was a mistake) + //tree.V[i]->g_T_weighted = INFINITY; // huh actually looking at this again, im not sure if I should be doing this (yeah I think its a mistake) + tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (tree.V[i])}); + + } + /* // Also check whether the edge is in the tree or not if (edge_in_tree(*v, *tree.V[i]) == false) { @@ -1216,9 +1238,8 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) //tree.V[i]->g_T_weighted = INFINITY; //I think these two lines were a mistake, should not be here tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (tree.V[i])}); - } - - } + */ + } } } @@ -1228,11 +1249,11 @@ std::tuple, std::shared_ptr> CBITPlanner::BestInEdge if (tree.QE.size() == 0) // need to handle a case where the return path is 100% optimal in which case things get stuck and need ot be flagged to break { //std::cout << "Edge Queue is Empty! Optimal Solution Found" << std::endl; - CLOG(WARNING, "path_planning.cbit_planner") << "Edge Queue is Empty, Solution Could Not be Improved This Batch"; - CLOG(WARNING, "path_planning.cbit_planner") << "Tree Size: " << tree.V.size() << " Vertex Queue Size: " << tree.QV.size() << " Sample Size: " <p << " q: " << tree.V[0]->q << " g_T_weighted: " << tree.V[0]->g_T_weighted; + CLOG(DEBUG, "path_planning.cbit_planner") << "Edge Queue is Empty, Solution Could Not be Improved This Batch"; + //CLOG(WARNING, "path_planning.cbit_planner") << "Tree Size: " << tree.V.size() << " Vertex Queue Size: " << tree.QV.size() << " Sample Size: " <p << " q: " << tree.V[0]->q << " g_T_weighted: " << tree.V[0]->g_T_weighted; - CLOG(WARNING, "path_planning.cbit_planner") << "Repair mode is: " << repair_mode; + //CLOG(WARNING, "path_planning.cbit_planner") << "Repair mode is: " << repair_mode; //CLOG(WARNING, "path_planning.cbit_planner") << "final sample is p: " << samples[samples.size()-1]->p; // note this can cause indexing errors in some cases @@ -1291,7 +1312,11 @@ std::tuple, std::vector> CBITPlanner::ExtractPath(vt { CLOG(DEBUG, "path_planning.cbit_planner") << "Something Went Wrong - Infinite Loop Detected, Initiating Hard Reset:"; //std::this_thread::sleep_for(std::chrono::milliseconds(2000)); //temp, remove this - HardReset(robot_state, costmap_ptr); + //HardReset(robot_state, costmap_ptr); + + + + /* CLOG(ERROR, "path_planning.cbit_planner") << "Node p:" << node.p << " q: " << node.q; CLOG(ERROR, "path_planning.cbit_planner") << "Node Parent p:" << (*node.parent).p << " q: " << (*node.parent).q; @@ -1434,8 +1459,8 @@ bool CBITPlanner::col_check_path() { Node vertex = *curv_path[i]; Node euclid_pt = curve_to_euclid(vertex); - //if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking - if (costmap_col(euclid_pt)) + if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking + //if (costmap_col(euclid_pt)) { if (collision == false) // Enter this statement only the first time { @@ -1575,7 +1600,6 @@ void CBITPlanner::restore_tree(double g_T_update, double g_T_weighted_update) // Function which takes in a beginning vertex v, and an end vertex x, and checks whether its in the tree or not already -// C++ sucks so this is actually very annoying to do bool CBITPlanner::edge_in_tree(Node v, Node x) { for (int i = 0; i < tree.E.size(); i++ ) @@ -1593,12 +1617,27 @@ bool CBITPlanner::edge_in_tree(Node v, Node x) */ } return false; - +} +// Function which takes in a beginning vertex v, and an end vertex x, and checks whether its in the tree or not already +// This version uses address matching (safer) +bool CBITPlanner::edge_in_tree_v2(std::shared_ptr v, std::shared_ptr x) +{ + int edges_size = tree.E.size(); + for (int i = 0; i < edges_size; i++ ) + { + // Alternative way to do this using node addresses + if ((std::get<0>(tree.E[i]) == v) && (std::get<1>(tree.E[i]) == x)) + { + return true; + } + } + return false; } + // Function for checking whether a node lives in the Vertex tree bool CBITPlanner::node_in_tree(Node x) { @@ -1614,6 +1653,22 @@ bool CBITPlanner::node_in_tree(Node x) } +// Function for checking whether a node lives in the Vertex tree, updated to use address matching (safer) +bool CBITPlanner::node_in_tree_v2(std::shared_ptr x) +{ + int vertices_size = tree.V.size(); + for (int i = 0; i < vertices_size; i++ ) + { + + if (x == tree.V[i]) + { + return true; + } + } + return false; + +} + // Edge cost calculation with collision check double CBITPlanner::cost_col(std::vector> obs, Node start, Node end) @@ -1695,18 +1750,20 @@ bool CBITPlanner::is_inside_obs(std::vector> obs, Node node) { for (int i = 0; i < obs.size(); i++) { - double x = obs[i][0]; - double y = obs[i][1]; - double w = obs[i][2]; - double h = obs[i][3]; - //bool test1 = (0 <= (node.p - x) <= w); - //bool test2 = ((0 <= (node.p -x)) && ((node.p -x) <= w) && (0 <= (node.p -x)) && ((node.p -x) <= h)); - //bool test3 = ((0 <= (node.p -x)) && ((node.p -x) <= w)); - //bool test4 = ((0 <= (node.p -x)) && ((node.p -x) <= h)); - if ((0 <= (node.p - x)) && ((node.p - x) <= w) && (0 <= (node.q - y)) && ((node.q - y) <= h)) - { - return true; - } + + + double x = obs[i][0]; + double y = obs[i][1]; + double w = obs[i][2]; + double h = obs[i][3]; + //bool test1 = (0 <= (node.p - x) <= w); + //bool test2 = ((0 <= (node.p -x)) && ((node.p -x) <= w) && (0 <= (node.p -x)) && ((node.p -x) <= h)); + //bool test3 = ((0 <= (node.p -x)) && ((node.p -x) <= w)); + //bool test4 = ((0 <= (node.p -x)) && ((node.p -x) <= h)); + if ((0 <= (node.p - x)) && ((node.p - x) <= w) && (0 <= (node.q - y)) && ((node.q - y) <= h)) + { + return true; + } } return false; } @@ -1820,8 +1877,8 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl // Convert to euclid TODO: //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION Node euclid_pt = curve_to_euclid(curv_pt); - //if (is_inside_obs(obs, euclid_pt)) - if (costmap_col(euclid_pt)) + if (is_inside_obs(obs, euclid_pt)) + //if (costmap_col(euclid_pt)) { return true; } diff --git a/main/src/vtr_path_planning/src/cbit/utils.cpp b/main/src/vtr_path_planning/src/cbit/utils.cpp index c39e45946..f64725810 100644 --- a/main/src/vtr_path_planning/src/cbit/utils.cpp +++ b/main/src/vtr_path_planning/src/cbit/utils.cpp @@ -216,10 +216,10 @@ double exp_radius(double q, double sample_box_height, double sample_box_width, d */ double d = 2; - double lambda_x = sample_box_height + sample_box_width; + double lambda_x = sample_box_height * sample_box_width; double zeta = M_PI; double radius = 2.0 * eta * (pow((1.0 + (1.0/d)),(1.0/d))) * (pow((lambda_x/zeta),0.5)) * (pow((log(q) / q),(1.0/d))); - std::cout << "Expansion Radius: " << radius << std::endl; + //std::cout << "Expansion Radius: " << radius << std::endl; return radius; } From 799116aa748f6fe31f30bf5bbce98f4ed5b3c06f Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 29 Nov 2022 09:29:25 -0500 Subject: [PATCH 04/25] Refactoring and optimization of the obstacle free base planner, significant speed improvement --- .../include/vtr_path_planning/cbit/utils.hpp | 1 + .../src/cbit/cbit_path_planner.cpp | 164 ++++++++++++++++-- .../src/cbit/cbit_plotting.cpp | 2 +- 3 files changed, 149 insertions(+), 18 deletions(-) diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp index b21b1c09f..9c62b1c42 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp @@ -90,6 +90,7 @@ class Tree { std::vector> V_Old; std::vector> V_Repair_Backup; std::vector, std::shared_ptr>> E; + std::vector, std::shared_ptr>> E_Old; std::vector> QV; // using shared pointers std::vector, std::shared_ptr>> QE; Tree() = default; diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 6b5be07ce..3ef666add 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -75,7 +75,7 @@ CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, dynamic_window_width = conf.sliding_window_width; // DEBUG PLOTTING - initialize_plot(); + //initialize_plot(); InitializePlanningSpace(); Planning(robot_state, costmap_ptr); @@ -239,6 +239,12 @@ void CBITPlanner::ResetPlanner() // Main planning function void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr) { + // find some better places to initialize these + double prev_path_cost = INFINITY; // experimental + int compute_time = 0; // exprimental + int vertex_rej_prob = 100; // experimental + int dyn_batch_samples = conf.batch_samples; + // Grab the amount of time in ms between robot state updates int control_period_ms = (1.0 / conf.state_update_freq) * 1000.0; auto state_update_time = std::chrono::high_resolution_clock::now() + std::chrono::milliseconds(control_period_ms); @@ -372,21 +378,65 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Add the new goal (robot state) to the samples so it can be found again + // EXPERIMENTAL - want to try flushing all the samples just before pushing the goal, then resampling locally in a radius around the new goal. + // It probably makes sense that the sample density in the radius ring is based on the rgg radius as well + //samples.clear(); samples.push_back(p_goal); + //double r_s; + //double theta_s; + //double x_s; + //double y_s; + //for (int i = 0; i < 100; i++) + //{ + // r_s = 2.0 * sqrt(static_cast (rand()) / static_cast (RAND_MAX)) ; // todo replace magic number 2.0 in this one (not the next) + // theta_s = (static_cast (rand()) / static_cast (RAND_MAX)) * 2.0 * 3.1415; + // x_s = r_s * cos(theta_s) + p_goal->p; + // y_s = r_s * sin(theta_s) + p_goal->q; + // Node new_sample(x_s, y_s); + // samples.push_back(std::make_shared (new_sample)); + // CLOG(DEBUG, "path_planning.cbit_planner") << "Sample x: " << x_s << " Sample y: " << y_s; + //} // Find vertices in the tree which are close to the new state, then populate the vertex queue with only these values. tree.QV.clear(); tree.QE.clear(); + + // EXPERIMENTAL, only take the closest point in the tree and use this to connect to the new robot state + /* + //int nearby_sample_counter = 0; + double closest_sample_dist = INFINITY; + int closest_sample_ind; + double sample_dist; for (int i = 0; i < tree.V.size(); i++) { - if (calc_dist(*(tree.V[i]), *p_goal) <= 2.0 ) // TODO: replace magic number with a param, represents radius to search for state update rewires + sample_dist = calc_dist(*(tree.V[i]), *p_goal); + if ((sample_dist <= closest_sample_dist) && ((tree.V[i])->p > (p_goal->p + (conf.initial_exp_rad/2)))) // TODO: replace magic number with a param, represents radius to search for state update rewires { - tree.QV.push_back(tree.V[i]); + closest_sample_dist = sample_dist; // used when only taking closest point in the tree to consider + closest_sample_ind = i; // used when only taking closest point in the tree to consider + //tree.QV.push_back(tree.V[i]); // comment out when only taking closest point in the tree to consider } } + tree.QV.push_back(tree.V[closest_sample_ind]); // used when only taking closest point in the tree to consider + */ + + // Alternative: Take all points in a radius of 2.0m around the new robot state (only if they are a ahead though) + double sample_dist; + for (int i = 0; i < tree.V.size(); i++) + { + sample_dist = calc_dist(*(tree.V[i]), *p_goal); + if ((sample_dist <= 2.0) && ((tree.V[i])->p > (p_goal->p + (conf.initial_exp_rad/2)))) // TODO: replace magic number with a param, represents radius to search for state update rewires + { + tree.QV.push_back(tree.V[i]); // comment out when only taking closest point in the tree to consider + } + } + + + - CLOG(DEBUG, "path_planning.cbit_planner") << "Robot State Updated Successfully"; - //CLOG(INFO, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); + CLOG(DEBUG, "path_planning.cbit_planner") << "Robot State Updated Successfully, p: " << p_goal->p << " q: " << p_goal->q; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Nearest Vertex, p: " << tree.V[closest_sample_ind]->p << " q: " << tree.V[closest_sample_ind]->q; + CLOG(DEBUG, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); // When the planner resumes, this will cause it to immediately try to rewire locally to the new robot state in a short amount of time @@ -422,11 +472,47 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo auto duration = std::chrono::duration_cast(stop_time - start_time); //std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; + compute_time = static_cast (duration.count()); // Debug message for online use (wont use std::out long term) CLOG(INFO, "path_planning.cbit_planner") << "New Batch - Iteration: " << k << " Path Cost: " << p_goal->g_T_weighted << " Batch Compute Time (ms): " << duration.count(); CLOG(INFO, "path_planning.cbit_planner") << "Tree Vertex Size: " << tree.V.size() << " Tree Edge Size: " << tree.E.size() << " Sample Size: " << samples.size(); + // If the solution does not improve, backup the tree to restrict unnecessary growth + if (abs(p_goal->g_T_weighted - prev_path_cost) < 1e-6) + { + CLOG(DEBUG, "path_planning.cbit_planner") << "There Was No Path Improvement, Restoring Tree to Previous Batch To Restrict Growth."; + tree.V = tree.V_Old; + tree.E = tree.E_Old; + } + prev_path_cost = p_goal->g_T_weighted; + + // EXPERIMENTAL + // if the batch compute time starts to creep up, it means the tree is getting abit to bloated + // Its likely that more samples will not really help us much while going around obstacles, so we should reduce the batch size + if (compute_time >= 200) + { + if (dyn_batch_samples >= 10) // cant have negative samples + { + dyn_batch_samples = dyn_batch_samples - 10; + // Also increase the probability of rejecting vertex queue vertices to improve performance + vertex_rej_prob = vertex_rej_prob - 10; + CLOG(DEBUG, "path_planning.cbit_planner") << "Compute Falling Behind, Reducing Batch Size To " << m << " and Decreasing Vertex Acceptance Probability to " << vertex_rej_prob << "%"; + } + } + // Conversely if batch compute time is fast, we can handle more samples (up to a max) + else + { + if (dyn_batch_samples < conf.batch_samples) + { + vertex_rej_prob = vertex_rej_prob + 2; + dyn_batch_samples = dyn_batch_samples + 2; + } + } + m = dyn_batch_samples; + + + // Extract the solution //std::cout << "Made it just before extractpath" << std::endl; std::tuple, std::vector> curv_path = ExtractPath(robot_state, costmap_ptr); @@ -444,7 +530,12 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo //std::cout << "Made it to end of new batch process code" << std::endl; //DEBUG PLOTTING - plot_tree(tree, *p_goal, path_x, path_y, samples); + //auto plot_start_time = std::chrono::high_resolution_clock::now(); + //plot_tree(tree, *p_goal, path_x, path_y, samples); + //auto plot_stop_time = std::chrono::high_resolution_clock::now(); + //auto duration_plot = std::chrono::duration_cast(plot_stop_time - plot_start_time); + //CLOG(ERROR, "path_planning.cbit_planner") << "Plot Time: " << duration_plot.count() << "ms"; + } @@ -516,22 +607,55 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } - - // Backup the old tree: (Not using this anymore) - /* + + // Backup the old tree: + tree.V_Old.clear(); + tree.E_Old.clear(); for (int i = 0; i < tree.V.size(); i++) { - tree.V_Old.push_back(std::shared_ptr (tree.V[i])); + tree.V_Old.push_back((tree.V[i])); + } + + for (int i = 0; i < tree.E.size(); i++) + { + tree.E_Old.push_back(tree.E[i]); } - */ - // Initialize the Vertex Queue; - //tree.QV = tree.V; // C++ is dumb so this doesnt work with shared pointer vectors, need to do the following instead + // Initialize the Vertex Queue with all nearby vertices in the tree; + //tree.QV = tree.V; // C++ is dumb so this doesnt work with shared pointer vectors, need to do the following instead + //bool reject_vertex = false; + std::random_device rd; // Only used once to initialise (seed) engine + std::mt19937 rng(rd()); // Random-number engine used (Mersenne-Twister in this case) + std::uniform_int_distribution uni(1,100); // Guaranteed unbiased for (int i = 0; i < tree.V.size(); i++) { - tree.QV.push_back(std::shared_ptr (tree.V[i])); + // TODO: It may also be worth considering only adding a random subset of the vertex tree to the queue in times when batches are taking a long time + //if (compute_time >= 200) + //{ + auto random_integer = uni(rng); + random_integer = static_cast (random_integer); + //reject_vertex = static_cast (random_integer); + //CLOG(DEBUG, "path_planning.cbit_planner") << "Compute Falling Behind, Vertices, reject vertex: " << reject_vertex; + //} + //else + //{ + // reject_vertex = false; + //} + + // if we have no 1st batch solution (we are in the first iteration or have just reset), add the whole tree to the queue + if (k == 0) + { + tree.QV.push_back(tree.V[i]); + } + // Otherwise, only add the portions of the tree within the sliding window to avoid processing preseeded vertices which are already optimal + else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + conf.sliding_window_freespace_padding) && ((vertex_rej_prob / random_integer) >= 1.0)) + { + tree.QV.push_back(tree.V[i]); + } + } + CLOG(DEBUG, "path_planning.cbit_planner") << "QV size after batch: " << tree.QV.size(); // TODO: Dynamic Expansion radius selection: // Its debatable whether this works well or not, but going to try it @@ -774,7 +898,7 @@ std::vector> CBITPlanner::SampleBox(int m) double c_min = p_start->p - p_goal->p; double padding = (c_best - c_min) / 2.0; // Padding to apply to maintain the heuristic ellipse additional width for probabilstic completeness double lookahead = dynamic_window_width; - double box_tolerance = 0.1; // Need to add a little extra height to the box when using ROC regions + double box_tolerance = 0.1; // Need to add a little extra height to the box when using ROC regions TODO make this config param // Calculate dimensions double q_max = *std::max_element(path_y.begin(), path_y.end()) + box_tolerance; // apparently this function can be abit slow, so be aware @@ -782,7 +906,13 @@ std::vector> CBITPlanner::SampleBox(int m) q_max = std::max(fabs(q_max), fabs(q_min)); - double p_max = p_goal->p + lookahead - padding; + // maintain a minimum sample box height, this prevents sample density from becoming too ridiculous when following the path + if (q_max < 0.5) // TODO make this config param + { + q_max = 0.5; + } + + double p_max = p_goal->p + lookahead + padding; double p_zero = p_goal->p - padding; //std::cout << "p_max: " << p_max << std::endl; @@ -1204,7 +1334,7 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) // First check to see that v is not in the old tree already, if it is return early for (int i = 0; i < tree.V_Old.size(); i++) { - if ((v->p == tree.V_Old[i]->p) && (v->q == tree.V_Old[i]->q)) + if (v == tree.V_Old[i]) { //CLOG(WARNING, "path_planning.cbit_planner") << "Returning early, expansion vertex is in V_old"; // For debug return; diff --git a/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp index 255163954..413489b3f 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp @@ -91,7 +91,7 @@ void plot_tree(Tree tree, Node robot_pq, std::vector path_p, std::vector // Display and show while continuing script (adds 1ms delay to draw) - matplotlibcpp::draw(); + //matplotlibcpp::draw(); matplotlibcpp::pause(0.001); return; } From d0a7ef6c7383566a65f0c7da8bd492d95c00d855 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 29 Nov 2022 15:30:41 -0500 Subject: [PATCH 05/25] overhauled how obstacles are handled in the planner, significantly faster and more stable now --- .../cbit/cbit_path_planner.hpp | 1 + .../src/cbit/cbit_path_planner.cpp | 279 +++++++++++------- 2 files changed, 170 insertions(+), 110 deletions(-) diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 5cdcac0ff..2a3274b11 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -120,6 +120,7 @@ class CBITPlanner { bool costmap_col(Node node); bool discrete_collision(std::vector> obs, double discretization, Node start, Node end); bool col_check_path(); + std::shared_ptr col_check_path_v2(); void restore_tree(double g_T_update, double g_T_weighted_update); // Add class for Tree diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 3ef666add..8b862a2e2 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -278,7 +278,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Check whether a robot state update should be applied // We only update the state if A: we have first found a valid initial solution, and B: if the current time has elapsed the control period - if (conf.update_state == true && repair_mode == false) + if (conf.update_state == true) { if ((p_goal->parent != nullptr) && (std::chrono::high_resolution_clock::now() >= state_update_time)) { @@ -345,8 +345,12 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Perform a state update to convert the actual robot position to its corresponding pq space: p_goal = UpdateState(); + //TODO: It could be useful to convert this p_goal back to euclid and compare with se3_robot_pose to verify the conversion worked properly (error should be very low) + // If its not, we probably need to handle it or return an error + // EXPERIMENTAL TODO: If robot pose nears the goal (p_start), exit the planner with the current solution + // Note, not sure if this is super easy, because I think if the planner exits the vt&r code will immediately relaunch it //CLOG(ERROR, "path_planning") << "Trying the distance to goal is: " << abs(p_goal->p - p_start->p); //if (abs(p_goal->p - p_start->p) <= 2.0) //{ @@ -355,11 +359,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo //} // If we arent in repair mode, update the backup goal - - if (repair_mode == false) - { - p_goal_backup = p_goal; - } + // There is a case where the robot is actually starting behind the world frame of the teach path (or the localization thinks it is at first anyways) @@ -462,127 +462,153 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo if (p_goal->parent != nullptr) { - if (repair_mode==false) - { - //std::cout << "Iteration: " << k << std::endl; - //std::cout << "Path Cost: " << p_goal->g_T_weighted << std::endl; + //std::cout << "Iteration: " << k << std::endl; + //std::cout << "Path Cost: " << p_goal->g_T_weighted << std::endl; + + // Benchmark current compute time + auto stop_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(stop_time - start_time); + //std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; - // Benchmark current compute time - auto stop_time = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(stop_time - start_time); - //std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; + compute_time = static_cast (duration.count()); - compute_time = static_cast (duration.count()); + // Debug message for online use (wont use std::out long term) + CLOG(INFO, "path_planning.cbit_planner") << "New Batch - Iteration: " << k << " Path Cost: " << p_goal->g_T_weighted << " Batch Compute Time (ms): " << duration.count(); + CLOG(INFO, "path_planning.cbit_planner") << "Tree Vertex Size: " << tree.V.size() << " Tree Edge Size: " << tree.E.size() << " Sample Size: " << samples.size(); - // Debug message for online use (wont use std::out long term) - CLOG(INFO, "path_planning.cbit_planner") << "New Batch - Iteration: " << k << " Path Cost: " << p_goal->g_T_weighted << " Batch Compute Time (ms): " << duration.count(); - CLOG(INFO, "path_planning.cbit_planner") << "Tree Vertex Size: " << tree.V.size() << " Tree Edge Size: " << tree.E.size() << " Sample Size: " << samples.size(); + // If the solution does not improve, backup the tree to restrict unnecessary growth + if (abs(p_goal->g_T_weighted - prev_path_cost) < 1e-6) + { + CLOG(DEBUG, "path_planning.cbit_planner") << "There Was No Path Improvement, Restoring Tree to Previous Batch To Restrict Growth."; + tree.V = tree.V_Old; + tree.E = tree.E_Old; + } + prev_path_cost = p_goal->g_T_weighted; - // If the solution does not improve, backup the tree to restrict unnecessary growth - if (abs(p_goal->g_T_weighted - prev_path_cost) < 1e-6) + // EXPERIMENTAL + // if the batch compute time starts to creep up, it means the tree is getting abit to bloated + // Its likely that more samples will not really help us much while going around obstacles, so we should reduce the batch size + if (compute_time >= 200) + { + if (vertex_rej_prob > 60) // never want to exceed some minimums { - CLOG(DEBUG, "path_planning.cbit_planner") << "There Was No Path Improvement, Restoring Tree to Previous Batch To Restrict Growth."; - tree.V = tree.V_Old; - tree.E = tree.E_Old; + dyn_batch_samples = dyn_batch_samples - 10; + // Also increase the probability of rejecting vertex queue vertices to improve performance + vertex_rej_prob = vertex_rej_prob - 10; + CLOG(DEBUG, "path_planning.cbit_planner") << "Compute Falling Behind, Reducing Batch Size To " << dyn_batch_samples << " and Decreasing Vertex Acceptance Probability to " << vertex_rej_prob << "%"; } - prev_path_cost = p_goal->g_T_weighted; + } + // Conversely if batch compute time is fast, we can handle more samples (up to a max) + else + { + if (vertex_rej_prob < 100) + { + vertex_rej_prob = vertex_rej_prob + 2; + dyn_batch_samples = dyn_batch_samples + 2; + CLOG(DEBUG, "path_planning.cbit_planner") << "Compute Catching Up, Increasing Batch Size To " << dyn_batch_samples << " and Decreasing Vertex Acceptance Probability to " << vertex_rej_prob << "%"; + } + } + m = dyn_batch_samples; + + // Extract the solution + //std::cout << "Made it just before extractpath" << std::endl; + std::tuple, std::vector> curv_path = ExtractPath(robot_state, costmap_ptr); + path_x = std::get<0>(curv_path); // p coordinates of the current path (I should probably rename, this is misleading its not x and y) + path_y = std::get<1>(curv_path); // q coordinates of the current path (I should probably rename, this is misleading its not x and y) + + + // Store the Euclidean solution in the shared pointer memory (vector of Pose classes) so it can be accessed in the CBIT class + //std::cout << "Made it just before extracting euclid path" << std::endl; + std::vector euclid_path = ExtractEuclidPath(); + *cbit_path_ptr = euclid_path; + + // Reset the start time + start_time = std::chrono::high_resolution_clock::now(); + //std::cout << "Made it to end of new batch process code" << std::endl; + + //DEBUG PLOTTING + //auto plot_start_time = std::chrono::high_resolution_clock::now(); + //plot_tree(tree, *p_goal, path_x, path_y, samples); + //auto plot_stop_time = std::chrono::high_resolution_clock::now(); + //auto duration_plot = std::chrono::duration_cast(plot_stop_time - plot_start_time); + //CLOG(ERROR, "path_planning.cbit_planner") << "Plot Time: " << duration_plot.count() << "ms"; + + - // EXPERIMENTAL - // if the batch compute time starts to creep up, it means the tree is getting abit to bloated - // Its likely that more samples will not really help us much while going around obstacles, so we should reduce the batch size - if (compute_time >= 200) + + // First grab the latest obstacles (could potentially take this opportunity to make a more compact approximate obs representation) + + // Collision Check the batch solution: TODO:, will need to make wormhole modifcations long term + std::shared_ptr col_free_vertex = col_check_path_v2(); // outputs NULL if no collision + if (col_free_vertex != nullptr) + { + // If there is a collision, prune the tree of all vertices to the left of the this vertex + CLOG(WARNING, "path_planning.cbit_planner") << "Collision Detected:"; + CLOG(WARNING, "path_planning.cbit_planner") << "Collision Free Vertex is - p: " << col_free_vertex->p << " q: " << col_free_vertex->q; + + // Vertex Prune (maintain only vertices to the right of the collision free vertex) + std::vector> pruned_vertex_tree; + pruned_vertex_tree.reserve(tree.V.size()); + for (int i =0; i= 10) // cant have negative samples + if (tree.V[i]->p >= col_free_vertex->p) { - dyn_batch_samples = dyn_batch_samples - 10; - // Also increase the probability of rejecting vertex queue vertices to improve performance - vertex_rej_prob = vertex_rej_prob - 10; - CLOG(DEBUG, "path_planning.cbit_planner") << "Compute Falling Behind, Reducing Batch Size To " << m << " and Decreasing Vertex Acceptance Probability to " << vertex_rej_prob << "%"; + pruned_vertex_tree.push_back(tree.V[i]); } } - // Conversely if batch compute time is fast, we can handle more samples (up to a max) - else + tree.V = pruned_vertex_tree; + + // Edge Prune (meaintain only edges to the right of the collision free vertex) + std::vector, std::shared_ptr>> pruned_edge_tree; + pruned_edge_tree.reserve(tree.E.size()); + for (int i = 0; i (tree.E[i])->p >= col_free_vertex->p) { - vertex_rej_prob = vertex_rej_prob + 2; - dyn_batch_samples = dyn_batch_samples + 2; + pruned_edge_tree.push_back(tree.E[i]); } } - m = dyn_batch_samples; - - - - // Extract the solution - //std::cout << "Made it just before extractpath" << std::endl; - std::tuple, std::vector> curv_path = ExtractPath(robot_state, costmap_ptr); - path_x = std::get<0>(curv_path); // p coordinates of the current path (I should probably rename, this is misleading its not x and y) - path_y = std::get<1>(curv_path); // q coordinates of the current path (I should probably rename, this is misleading its not x and y) + + tree.E = pruned_edge_tree; - // Store the Euclidean solution in the shared pointer memory (vector of Pose classes) so it can be accessed in the CBIT class - //std::cout << "Made it just before extracting euclid path" << std::endl; - std::vector euclid_path = ExtractEuclidPath(); - *cbit_path_ptr = euclid_path; + // Reset the goal, and add it to the samples + p_goal->parent = nullptr; + p_goal->g_T = INFINITY; + p_goal->g_T_weighted = INFINITY; + samples.clear(); // first clear samples so they dont accumulate excessively + samples.push_back(p_goal); - // Reset the start time - start_time = std::chrono::high_resolution_clock::now(); - //std::cout << "Made it to end of new batch process code" << std::endl; + // This will cause samplefreespace to run this iteration, but if we made it here m will be set to conf.batch_samples (as its reset every iteration) + m = conf.initial_samples; + //k=0; - //DEBUG PLOTTING - //auto plot_start_time = std::chrono::high_resolution_clock::now(); - //plot_tree(tree, *p_goal, path_x, path_y, samples); - //auto plot_stop_time = std::chrono::high_resolution_clock::now(); - //auto duration_plot = std::chrono::duration_cast(plot_stop_time - plot_start_time); - //CLOG(ERROR, "path_planning.cbit_planner") << "Plot Time: " << duration_plot.count() << "ms"; - + // Re-initialize sliding window dimensions for plotting and radius expansion calc; + sample_box_height = conf.q_max * 2.0; + sample_box_width = conf.sliding_window_width + 2 * conf.sliding_window_freespace_padding; - } + // Disable pre-seeds from being regenerated + repair_mode = true; - - // Check if we are in repair mode, if we are and we reach this point, it means we will have just successfully rewired - // Now we need to try to recover the old tree and update all cost to come values - if (repair_mode==true) - { - double g_T_update = p_goal->g_T - repair_g_T_old; - double g_T_weighted_update = p_goal->g_T_weighted - repair_g_T_weighted_old; - - p_goal = p_goal_backup; - //CLOG(ERROR, "path_planning.cbit_planner") << "The p_goal is now set to p: " << p_goal->p << " q: " << p_goal->q; - try // experimental debug - { - restore_tree(g_T_update, g_T_weighted_update); - } - catch(...) + // Sample free-space wont generate pre-seeds, so we should generate our own in the portion of the tree that was dropped (col_free_vertex.p to p_goal.p) + int pre_seeds = abs(col_free_vertex->p - p_goal->p) / 0.25; // Note needed to change p_goal to p_zero. When the sliding window padding is large, pre-seeds wont get generated all the way to the goal + double p_step = 0.25; + double p_val = p_goal->p; + for (int i = 0; i < (pre_seeds-1); i++) { - CLOG(ERROR, "path_planning.cbit_planner") << "Failed to restore the tree, initiating hard reset."; + Node node((p_val+p_step), 0); + samples.push_back(std::make_shared (node)); + p_val = p_val + p_step; } - - - // Reset the goal (NOTE IN PYTHON I DO THIS AFTER REPAIR, BUT I THINK WITH MY QUICK AND DIRTY REPAIR I NEED TO SET IT BEFORE) - //p_goal = p_goal_backup; + } + else + { + // Reset the free space flag in sample freespace + CLOG(WARNING, "path_planning.cbit_planner") << "No Collision:"; repair_mode = false; - - tree.QV.clear(); - tree.QE.clear(); - samples.clear(); - - CLOG(INFO, "path_planning.cbit_planner") << "REPAIR MODE COMPLETED SUCCESSFULLY"; - CLOG(INFO, "path_planning.cbit_planner") << "The p_goal is now set to p: " << p_goal->p << " q: " << p_goal->q; - continue; } - - // After restoring the tree, (or finding a solution for a batch) we need to collision check the path to see if we need to go into repair mode - - repair_mode = col_check_path(); - - // DEBUG, dont leave this! bipasses tree restore and repair mode - //if (repair_mode) - //{ - // HardReset(robot_state, costmap_ptr); - //} - + } //std::cout << "Made it just before prune" << std::endl; Prune(p_goal->g_T, p_goal->g_T_weighted); @@ -628,6 +654,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo std::random_device rd; // Only used once to initialise (seed) engine std::mt19937 rng(rd()); // Random-number engine used (Mersenne-Twister in this case) std::uniform_int_distribution uni(1,100); // Guaranteed unbiased + //CLOG(ERROR, "path_planning.cbit_planner") << "Vertex Lookahead: "<< p_goal->p + dynamic_window_width + conf.sliding_window_freespace_padding+5; for (int i = 0; i < tree.V.size(); i++) { // TODO: It may also be worth considering only adding a random subset of the vertex tree to the queue in times when batches are taking a long time @@ -649,7 +676,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo tree.QV.push_back(tree.V[i]); } // Otherwise, only add the portions of the tree within the sliding window to avoid processing preseeded vertices which are already optimal - else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + conf.sliding_window_freespace_padding) && ((vertex_rej_prob / random_integer) >= 1.0)) + else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + (conf.sliding_window_freespace_padding*2)) && ((vertex_rej_prob / random_integer) >= 1.0)) + //else if (((vertex_rej_prob / random_integer) >= 1.0)) // for some reason using the lookahead queue doesnt work reliably for collisions, not sure why, need to investigate { tree.QV.push_back(tree.V[i]); } @@ -930,8 +958,8 @@ std::vector> CBITPlanner::SampleBox(int m) Node node(rand_p, rand_q); // TODO: Before we add the sample to the sample vector, we need to collision check it - if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) // legacy version with obstacle vectors - //if (costmap_col(curve_to_euclid(node))) // Using costmap for collision check + //if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) // legacy version with obstacle vectors + if (costmap_col(curve_to_euclid(node))) // Using costmap for collision check { continue; @@ -971,8 +999,8 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) Node node(rand_p, rand_q); // Before we add the sample to the sample vector, we need to collision check it - if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) - //if (costmap_col(curve_to_euclid(node))) + //if (is_inside_obs(obs_rectangle, curve_to_euclid(node))) + if (costmap_col(curve_to_euclid(node))) { continue; } @@ -1589,8 +1617,8 @@ bool CBITPlanner::col_check_path() { Node vertex = *curv_path[i]; Node euclid_pt = curve_to_euclid(vertex); - if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking - //if (costmap_col(euclid_pt)) + //if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking + if (costmap_col(euclid_pt)) { if (collision == false) // Enter this statement only the first time { @@ -1688,6 +1716,37 @@ bool CBITPlanner::col_check_path() } +std::shared_ptr CBITPlanner::col_check_path_v2() +{ + + // Generate path to collision check + std::vector> curv_path; + std::shared_ptr node_ptr = p_goal; + curv_path.push_back(node_ptr); + + while ((node_ptr->parent != nullptr)) + { + node_ptr = node_ptr->parent; + curv_path.push_back(node_ptr); + } + + // find the first vertex which results in a collision (curv_path is generated from left to right, so we should search in reverse) + std::shared_ptr col_free_vertex = nullptr; + + // TODO, actually we probably only want to consider edges in the lookahead (not way far away down the path) but I might deal with this later + for (int i = curv_path.size()-1; i>=0; i--) + { + Node vertex = *curv_path[i]; + Node euclid_pt = curve_to_euclid(vertex); + //if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking + if (costmap_col(euclid_pt)) + { + col_free_vertex = curv_path[i+1]; // take the vertex just before the collision vertex + } + } + return col_free_vertex; +} + void CBITPlanner::restore_tree(double g_T_update, double g_T_weighted_update) @@ -2007,8 +2066,8 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl // Convert to euclid TODO: //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION Node euclid_pt = curve_to_euclid(curv_pt); - if (is_inside_obs(obs, euclid_pt)) - //if (costmap_col(euclid_pt)) + //if (is_inside_obs(obs, euclid_pt)) + if (costmap_col(euclid_pt)) { return true; } From 874e3c64dddae80c64db1a54d69bf06951228057 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Thu, 22 Dec 2022 05:29:24 -0500 Subject: [PATCH 06/25] Major planner revisions for speed-up and increased reliability --- config/honeycomb_grizzly_default.yaml | 6 +- .../include/vtr_lidar/data_types/costmap.hpp | 4 + .../planning/change_detection_module_v3.hpp | 5 + main/src/vtr_lidar/src/data_types/costmap.cpp | 15 ++ .../planning/change_detection_module_v3.cpp | 216 +++++++++++++++++- main/src/vtr_lidar/src/path_planning/cbit.cpp | 12 +- .../cbit/cbit_path_planner.hpp | 1 + .../src/cbit/cbit_path_planner.cpp | 187 +++++++++++++-- rviz/honeycomb.rviz | 93 ++++---- 9 files changed, 473 insertions(+), 66 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index ccfdffe1b..353bc7c84 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -435,13 +435,13 @@ sliding_window_width: 10.0 sliding_window_freespace_padding: 0.5 corridor_resolution: 0.05 - state_update_freq: 0.5 + state_update_freq: 1.0 update_state: true rand_seed: 1 # Planner Tuning Params - initial_samples: 250 - batch_samples: 150 + initial_samples: 350 + batch_samples: 100 pre_seed_resolution: 0.5 alpha: 0.5 q_max: 2.5 diff --git a/main/src/vtr_lidar/include/vtr_lidar/data_types/costmap.hpp b/main/src/vtr_lidar/include/vtr_lidar/data_types/costmap.hpp index 4971ec185..cecdb74c8 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/data_types/costmap.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/data_types/costmap.hpp @@ -139,6 +139,10 @@ class DenseCostMap : public BaseCostMap { /** \brief update from a sparse cost map */ void update(const std::unordered_map& values); + // Modification by Jordy, DEBUG + /** \brief update from a standard unordered_map */ + void update(const std::unordered_map, float> values); + XY2ValueMap filter(const float& threshold) const override; /** \brief Returns content of this class as a OccupancyGrid message. */ diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp index bf1b88564..0da27e9ea 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp @@ -88,9 +88,14 @@ class ChangeDetectionModuleV3 : public tactic::BaseModule { bool publisher_initialized_ = false; rclcpp::Publisher::SharedPtr scan_pub_; rclcpp::Publisher::SharedPtr costmap_pub_; + rclcpp::Publisher::SharedPtr filtered_costmap_pub_ ; rclcpp::Publisher::SharedPtr costpcd_pub_; VTR_REGISTER_MODULE_DEC_TYPE(ChangeDetectionModuleV3); + + + // Modificatons for Temporal costmap filter + std::vector, float>> costmap_history; }; } // namespace lidar diff --git a/main/src/vtr_lidar/src/data_types/costmap.cpp b/main/src/vtr_lidar/src/data_types/costmap.cpp index 63fba3813..e37cc3625 100644 --- a/main/src/vtr_lidar/src/data_types/costmap.cpp +++ b/main/src/vtr_lidar/src/data_types/costmap.cpp @@ -134,6 +134,21 @@ void DenseCostMap::update( } } +// Modification by Jordy for updating dense maps from unordered maps with pairs of points () +void DenseCostMap::update( + const std::unordered_map, float> values) { + for (const auto val : values) { + const auto shifted_k = costmap::PixKey(val.first.first / dl_, val.first.second / dl_) - origin_; + //const auto shifted_k_x = val.first.first;// - origin_.x; + //const auto shifted_k_y = val.first.second;// - origin_.y; + //CLOG(INFO, "obstacle_detection.cbit") << "shifted_k_x" << shifted_k_x; + //CLOG(INFO, "obstacle_detection.cbit") << "shifted_k_y" << shifted_k_y; + //CLOG(INFO, "obstacle_detection.cbit") << "origin_x" << origin_.x; + //CLOG(INFO, "obstacle_detection.cbit") << "origin_y" << origin_.y; + values_(shifted_k.x, shifted_k.y) = val.second; + } +} + auto DenseCostMap::filter(const float& threshold) const -> XY2ValueMap { XY2ValueMap filtered; filtered.reserve(values_.size()); diff --git a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp b/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp index 2b776066f..bd0b483c6 100644 --- a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp +++ b/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp @@ -134,6 +134,7 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, // clang-format off scan_pub_ = qdata.node->create_publisher("change_detection_scan", 5); costmap_pub_ = qdata.node->create_publisher("change_detection_costmap", 5); + filtered_costmap_pub_ = qdata.node->create_publisher("filtered_change_detection_costmap", 5); costpcd_pub_ = qdata.node->create_publisher("change_detection_costpcd", 5); // clang-format on publisher_initialized_ = true; @@ -307,6 +308,209 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, costmap->vertex_id() = vid_loc; costmap->vertex_sid() = sid_loc; + + + + + // Jordy Modifications for temporal costmap filtering (UNDER DEVELOPMENT) + + + /**/ + // declaration of the final costmap which we are outputting + auto dense_costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); + dense_costmap->T_vertex_this() = tactic::EdgeTransform(true); + dense_costmap->vertex_id() = vid_loc; + dense_costmap->vertex_sid() = sid_loc; + + // Create a sparse costmap and store it in a sliding window history + const auto sparse_costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); + sparse_costmap->update(detect_change_op); + // add transform to the localization vertex + sparse_costmap->T_vertex_this() = tactic::EdgeTransform(true); + sparse_costmap->vertex_id() = vid_loc; + sparse_costmap->vertex_sid() = sid_loc; + + // Get the localization chain transform (lets us transform from costmap frame to world frame): + auto& chain = *output.chain; + auto T_w_c = chain.pose(sid_loc); + auto T_c_w = T_w_c.inverse(); + CLOG(WARNING, "obstacle_detection.cbit") << "T_w_c: " << T_w_c; // debug + + // Initialize an unordered map to store the sparse world obstacle representations + std::unordered_map, float> sparse_world_map; + + // Filter non-obstacles + vtr::lidar::BaseCostMap::XY2ValueMap sparse_obs_map = sparse_costmap->filter(0.01); + + // Iterate through the key value pairs, convert to a world frame unordered_map + std::vector> keys; + keys.reserve(sparse_obs_map.size()); + std::vector vals; + vals.reserve(sparse_obs_map.size()); + for(auto kv : sparse_obs_map) + { + float key_x = kv.first.first; + float key_y = kv.first.second; + // Convert the centre of each obstacle key value pair into the world frame + Eigen::Matrix grid_pt({key_x+(config_->resolution/2), key_y+(config_->resolution/2), 0.0, 1}); + auto collision_pt = T_w_c * grid_pt; + + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying Transform: " << (sparse_costmap->T_vertex_this().inverse()); + + float world_key_x = floor(collision_pt[0] / config_->resolution) * config_->resolution; + float world_key_y = floor(collision_pt[1] / config_->resolution) * config_->resolution; + float world_value = kv.second; + std::pair world_keys(world_key_x, world_key_y); + sparse_world_map.insert(std::make_pair(world_keys,world_value)); + + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying Key X: " << key_x; + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying Key Y: " << key_y; + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying World Key X: " << world_key_x; + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying World Key Y: " << world_key_y; + + + keys.push_back(kv.first); + vals.push_back(kv.second); + } + CLOG(ERROR, "obstacle_detection.cbit") << "The size of each map is" << sparse_obs_map.size(); + CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys; + CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals; + + //CLOG(WARNING, "obstacle_detection.cbit") << "T_c_w " << T_c_w; + //CLOG(WARNING, "obstacle_detection.cbit") << "T_m_s: " << T_m_s; + //CLOG(WARNING, "obstacle_detection.cbit") << "T_s_r_inv: " << T_s_r.inverse(); + + + + // if the costmap_history is smaller then some minimum value, just tack it on the end + if (costmap_history.size() < 10.0) + { + costmap_history.push_back(sparse_world_map); + } + // After that point, we then do a sliding window using shift operations, moving out the oldest map and appending the newest one + else + { + for (int i = 0; i < 9; i++) + { + costmap_history[i] = costmap_history[i + 1]; + } + costmap_history[9] = sparse_world_map; + + + //CLOG(WARNING, "obstacle_detection.cbit") << "costmap_history size " <, float> merged_world_map = costmap_history[9]; + //CLOG(WARNING, "obstacle_detection.cbit") << "merged world map size " <, float> filtered_world_map; + for (int i = 0; i < (costmap_history.size()-1); i++) + { + merged_world_map.merge(costmap_history[i]); + //CLOG(WARNING, "obstacle_detection.cbit") << "merged world map size " <> world_keys; + //world_keys.reserve(merged_world_map.size()); + //std::vector world_vals; + //world_vals.reserve(merged_world_map.size()); + for(auto kv : merged_world_map) + { + //world_keys.push_back(kv.first); + //world_vals.push_back(kv.second); + + int key_vote_counter = 0; + for (int i = 0; i < costmap_history.size(); i++) + if ((costmap_history[i]).find(kv.first) != (costmap_history[i]).end()) + { + key_vote_counter = key_vote_counter + 1; + } + + // Store the filtered (both temporal and transient) points in a sparse filtered unordered_map + if (key_vote_counter >= 3) + { + filtered_world_map.insert(std::make_pair(kv.first,kv.second)); + } + } + CLOG(WARNING, "obstacle_detection.cbit") << "filtered world map size " <, float> filtered_loc_map; + std::vector> filtered_keys; + filtered_keys.reserve(filtered_world_map.size()); + std::vector filtered_vals; + filtered_vals.reserve(filtered_world_map.size()); + for(auto kv : filtered_world_map) + { + float key_x = kv.first.first; + float key_y = kv.first.second; + // Convert the centre of each obstacle key value pair into the world frame + Eigen::Matrix grid_pt({key_x+(config_->resolution/2), key_y+(config_->resolution/2), 0.0, 1}); + auto collision_pt = T_c_w * grid_pt; + + //CLOG(DEBUG, "obstacle_detection.cbit") << "Displaying Transform: " << (sparse_costmap->T_vertex_this().inverse()); + + float loc_key_x = floor(collision_pt[0] / config_->resolution) * config_->resolution; + float loc_key_y = floor(collision_pt[1] / config_->resolution) * config_->resolution; + float loc_value = kv.second; + std::pair loc_keys(loc_key_x, loc_key_y); + filtered_loc_map.insert(std::make_pair(loc_keys,loc_value)); + } + CLOG(ERROR, "obstacle_detection.cbit") << "The size of the filtered map is" << filtered_loc_map.size(); + + + // Build the dense map, publish and save the results so the planner can access it + //const auto dense_costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); // need to delcare this earlier + dense_costmap->update(filtered_loc_map); + // add transform to the localization vertex + //dense_costmap->T_vertex_this() = tactic::EdgeTransform(true); + //dense_costmap->vertex_id() = vid_loc; + //dense_costmap->vertex_sid() = sid_loc; + + // debug, temporarily using my filtered costmap as the main costmap + //auto costmap_msg = dense_costmap->toCostMapMsg(); + //costmap_msg.header.frame_id = "loc vertex frame"; + // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); + //costmap_pub_->publish(costmap_msg); + + // publish the filtered occupancy grid + auto filtered_costmap_msg = dense_costmap->toCostMapMsg(); + filtered_costmap_msg.header.frame_id = "loc vertex frame"; + // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); + filtered_costmap_pub_->publish(filtered_costmap_msg); + + + // Debug check that the filtered maps look okay + vtr::lidar::BaseCostMap::XY2ValueMap dense_map = dense_costmap->filter(0.01); + std::vector> keys2; + keys2.reserve(dense_map.size()); + std::vector vals2; + vals2.reserve(dense_map.size()); + for(auto kv : dense_map) { + keys2.push_back(kv.first); + vals2.push_back(kv.second); + } + CLOG(ERROR, "obstacle_detection.cbit") << "The size of the dense map is" << dense_map.size(); + CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys2; + CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals2; + + } + + + + + + // End of Jordy's temporal filter changes + + + + + /// publish the transformed pointcloud if (config_->visualize) { // publish the aligned points @@ -332,7 +536,17 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, /// output auto change_detection_costmap_ref = output.change_detection_costmap.locked(); auto &change_detection_costmap = change_detection_costmap_ref.get(); - change_detection_costmap = costmap; + //change_detection_costmap = costmap; + + //Jordy debug, using experimental new costmaps instead + if (costmap_history.size() < 10.0) + { + change_detection_costmap = costmap; + } + else + { + change_detection_costmap = dense_costmap; + } CLOG(INFO, "lidar.change_detection") << "Change detection for lidar scan at stamp: " << stamp << " - DONE"; diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index c35aa9e64..c045ef6f0 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -178,6 +178,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // Store the transform T_c_w (from costmap to world) costmap_ptr->T_c_w = T_start_vertex.inverse(); // note that T_start_vertex is T_w_c if we want to bring keypoints to the world frame // Store the grid resoltuion + CLOG(DEBUG, "obstacle_detection.cbit") << "The costmap to world transform is: " << T_start_vertex.inverse(); costmap_ptr->grid_resolution = change_detection_costmap->dl(); // Experimental: Storing sequences of costmaps for temporal filtering purposes @@ -190,11 +191,12 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // After that point, we then do a sliding window using shift operations, moving out the oldest map and appending the newest one else { - for (int i = 0; i < (config_->costmap_history-1); i++) - { - costmap_ptr->obs_map_vect[i] = costmap_ptr->obs_map_vect[i + 1]; - costmap_ptr->T_c_w_vect[i] = costmap_ptr->T_c_w_vect[i + 1]; - } + // Legacy code for when I was filtering inside cbit (made a quick and dirty work around temporarily) + //for (int i = 0; i < (config_->costmap_history-1); i++) + //{ + // costmap_ptr->obs_map_vect[i] = costmap_ptr->obs_map_vect[i + 1]; + // costmap_ptr->T_c_w_vect[i] = costmap_ptr->T_c_w_vect[i + 1]; + //} costmap_ptr->obs_map_vect[config_->costmap_history-1] = obs_map; costmap_ptr->T_c_w_vect[config_->costmap_history-1] = costmap_ptr->T_c_w ; } diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 2a3274b11..070db66f9 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -118,6 +118,7 @@ class CBITPlanner { Pose lin_interpolate(int p_ind, double p_val); bool is_inside_obs(std::vector> obs, Node node); bool costmap_col(Node node); + bool costmap_col_tight(Node node); bool discrete_collision(std::vector> obs, double discretization, Node start, Node end); bool col_check_path(); std::shared_ptr col_check_path_v2(); diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 8b862a2e2..f9442bab2 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -605,7 +605,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo else { // Reset the free space flag in sample freespace - CLOG(WARNING, "path_planning.cbit_planner") << "No Collision:"; + //CLOG(WARNING, "path_planning.cbit_planner") << "No Collision:"; repair_mode = false; } @@ -676,8 +676,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo tree.QV.push_back(tree.V[i]); } // Otherwise, only add the portions of the tree within the sliding window to avoid processing preseeded vertices which are already optimal - else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + (conf.sliding_window_freespace_padding*2)) && ((vertex_rej_prob / random_integer) >= 1.0)) - //else if (((vertex_rej_prob / random_integer) >= 1.0)) // for some reason using the lookahead queue doesnt work reliably for collisions, not sure why, need to investigate + //else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + (conf.sliding_window_freespace_padding*2)) && ((vertex_rej_prob / random_integer) >= 1.0)) + else if (((vertex_rej_prob / random_integer) >= 1.0)) // for some reason using the lookahead queue doesnt work reliably for collisions, not sure why, need to investigate { tree.QV.push_back(tree.V[i]); } @@ -1739,7 +1739,7 @@ std::shared_ptr CBITPlanner::col_check_path_v2() Node vertex = *curv_path[i]; Node euclid_pt = curve_to_euclid(vertex); //if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking - if (costmap_col(euclid_pt)) + if (costmap_col_tight(euclid_pt)) { col_free_vertex = curv_path[i+1]; // take the vertex just before the collision vertex } @@ -1957,9 +1957,8 @@ bool CBITPlanner::is_inside_obs(std::vector> obs, Node node) return false; } - // DEBUG: Experimental costmap collision checking -bool CBITPlanner::costmap_col(Node node) +bool CBITPlanner::costmap_col_tight(Node node) { // DEBUG: Make a spoofed obstacle to add to the costmap to make sure collision detection works @@ -1996,6 +1995,7 @@ bool CBITPlanner::costmap_col(Node node) float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; @@ -2005,28 +2005,187 @@ bool CBITPlanner::costmap_col(Node node) // Note may just make more sense to bring in the returns to this try/catch try { // Block of code to try - //grid_value = cbit_costmap_ptr->obs_map.at(std::pair (x_key, y_key)); grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); - //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; } catch (std::out_of_range) { // Block of code to handle errors grid_value = 0.0; - //CLOG(ERROR, "path_planning.cbit_planner") << "NO COLLISION!!!"; + //CLOG(ERROR, "path_planning.cbit_planner") << "Something went wrong in collision check!!!"; } if (grid_value > 0.0) { - vote_counter = vote_counter + 1; - //return true; + //vote_counter = vote_counter + 1; //debug, switching to external filter + return true; } - if (vote_counter >= 3)// Magic number for now - { - return true; + //debug, switching to external filter + //if (vote_counter >= 3)// Magic number for now + //{ + // return true; + //} + + } + return false; +} + + +// DEBUG: Experimental costmap collision checking +bool CBITPlanner::costmap_col(Node node) +{ + + // DEBUG: Make a spoofed obstacle to add to the costmap to make sure collision detection works + //std::pair fake_key(4.0, 0.0); + //float fake_value = 1.0; + //cbit_costmap_ptr->obs_map.insert(std::make_pair(fake_key,fake_value)); + + //CLOG(DEBUG, "path_planning.cbit_planner") << "Original Node: x: " << node.p << " y: " << node.q << " z: " << node.z; + + Eigen::Matrix test_pt({node.p, node.q, node.z, 1}); + + + + // Experimental, temporal filter (iterate through a sliding window of collision maps) + // For the moment what it does is collision check a history of 5 maps, and if any of those maps result in a collision, return collision + // Should make fluttery obstacles stay in place for about a second now, but the downside is false positives will also hang around, so may need to deal with this still + // Maybe by taking a vote? idk lets see how fast this is first + + //bool collision_result = false; + int vote_counter = 0; + for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) + { + + //auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; + auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; + + //CLOG(DEBUG, "path_planning.cbit_planner") << "Displaying the point in the costmap frame we are trying to collision check: " << collision_pt; + //CLOG(DEBUG, "path_planning.cbit_planner") << "X: " << collision_pt[0]; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y: " << collision_pt[1]; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Resolution: " << cbit_costmap_ptr->grid_resolution; + + + // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map + float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + + // EXPERIMENTAL + // want to explore also collision checking a small region around the actual x,y key too during active collision checking, this will prevent excessive rewiring + // It does increase the number of collision checks by a factor of 9 though, so might want to consider other options this was just the quick and dirty top of my head way + std::vector x_key_arr; + std::vector y_key_arr; + x_key_arr.clear(); + y_key_arr.clear(); + // just going to hard code for now because im tired and its getting late, dont leave this + x_key_arr.push_back(x_key); + y_key_arr.push_back(y_key); + /* + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key); + + x_key_arr.push_back(x_key); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); + + x_key_arr.push_back(x_key); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); + + + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); + + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); + */ + + // second layer + /* + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key); + + x_key_arr.push_back(x_key); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); + + x_key_arr.push_back(x_key); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + + x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + + x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); + y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); + */ + + + + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; + + float grid_value; + + // Check to see if the point is in the obstacle map + // Note may just make more sense to bring in the returns to this try/catch + try { + // Block of code to try + //grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); + + //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; + + + // conservative buffer region: + for (int j=0; j < x_key_arr.size(); j++) + { + + // for some reason this commented out bit isnt working and we have to rely on the try catch loop, which I dont understand why, need to do some debugging still + //CLOG(ERROR, "path_planning.cbit_planner") << " X_key: " << x_key_arr[j] << " Y key: " << y_key_arr[j]; + //if ((cbit_costmap_ptr->obs_map_vect[i]).find(std::pair (x_key_arr[j], y_key_arr[j])) != (cbit_costmap_ptr->obs_map_vect[i]).end()) + //{ + //CLOG(ERROR, "path_planning.cbit_planner") << " Entered Loop "; + grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key_arr[j], y_key_arr[j])); + if (grid_value > 0.0) + { + //vote_counter = vote_counter + 1; //debug, switching to external filter + //CLOG(ERROR, "path_planning.cbit_planner") << "COLLISION DETECTED"; + return true; + } + //} + } + } + catch (std::out_of_range) { + // Block of code to handle errors + grid_value = 0.0; + CLOG(ERROR, "path_planning.cbit_planner") << "Something went wrong in collision check!!!"; } + //if (grid_value > 0.0) + //{ + //vote_counter = vote_counter + 1; //debug, switching to external filter + // return true; + //} + + //debug, switching to external filter + //if (vote_counter >= 3)// Magic number for now + //{ + // return true; + //} + } return false; } diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index c75d458bd..150265988 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -20,7 +20,7 @@ Panels: - /Path1 - /Path2 Splitter Ratio: 0.4507042169570923 - Tree Height: 898 + Tree Height: 871 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -101,22 +101,8 @@ Visualization Manager: Show Names: true Tree: world: - loc vertex frame: - {} - odo vertex frame: - robot: - lidar: - {} - planning frame: - robot planning: - {} - robot planning (extrapolated): - {} - robot planning (extrapolated) mpc: - {} world (offset): - loc vertex frame (offset): - {} + {} Update Interval: 0 Value: true - Alpha: 1 @@ -992,27 +978,6 @@ Visualization Manager: Value: /vtr/safe_corridor_costmap_updates Use Timestamp: false Value: false - - Alpha: 0.699999988079071 - Class: rviz_default_plugins/Map - Color Scheme: costmap - Draw Behind: false - Enabled: true - Name: obstacle cost map - Topic: - Depth: 5 - Durability Policy: Volatile - Filter size: 10 - History Policy: Keep Last - Reliability Policy: Reliable - Value: /vtr/change_detection_costmap - Update Topic: - Depth: 5 - Durability Policy: Volatile - History Policy: Keep Last - Reliability Policy: Reliable - Value: /vtr/change_detection_costmap_updates - Use Timestamp: false - Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: @@ -1205,6 +1170,48 @@ Visualization Manager: Reliability Policy: Reliable Value: /vtr/robot_path Value: true + - Alpha: 0.699999988079071 + Class: rviz_default_plugins/Map + Color Scheme: costmap + Draw Behind: false + Enabled: true + Name: obstacle cost map + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/change_detection_costmap + Update Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/change_detection_costmap_updates + Use Timestamp: false + Value: true + - Alpha: 0.699999988079071 + Class: rviz_default_plugins/Map + Color Scheme: raw + Draw Behind: false + Enabled: true + Name: filtered_obstacle_map + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/filtered_change_detection_costmap + Update Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/filtered_change_detection_costmap_updates + Use Timestamp: false + Value: true Enabled: true Global Options: Background Color: 181; 181; 181 @@ -1251,7 +1258,7 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 13.163816452026367 + Distance: 18.444183349609375 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 @@ -1269,7 +1276,7 @@ Visualization Manager: Pitch: 1.5697963237762451 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 3.145707845687866 + Yaw: 4.775712966918945 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 @@ -1474,10 +1481,10 @@ Visualization Manager: Window Geometry: Displays: collapsed: false - Height: 1043 + Height: 1016 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003bdfc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003bd000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c00000030ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000030f000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004f9000003bd00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c00000030ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000030f000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004f9000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -1487,5 +1494,5 @@ Window Geometry: Views: collapsed: true Width: 1920 - X: 1920 - Y: 0 + X: 2560 + Y: 27 From f63691783c5a5fbf3ae06c4f4236374117e1deca Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Fri, 20 Jan 2023 17:19:49 -0500 Subject: [PATCH 07/25] Newest stable version, fixed many bugs and improved performance substantially with latest controller --- config/honeycomb_grizzly_default.yaml | 63 ++- launch/online_honeycomb_grizzly.launch.yaml | 1 + .../vtr-gui/src/components/graph/GraphMap.js | 2 +- .../planning/change_detection_module_v3.hpp | 1 + .../include/vtr_lidar/path_planning/mpc.hpp | 5 +- main/src/vtr_lidar/src/data_types/costmap.cpp | 31 +- .../planning/change_detection_module_v3.cpp | 60 ++- main/src/vtr_lidar/src/path_planning/cbit.cpp | 45 +- main/src/vtr_lidar/src/path_planning/mpc.cpp | 328 ++++++------ .../include/vtr_path_planning/cbit/cbit.hpp | 5 + .../cbit/cbit_path_planner.hpp | 18 +- .../vtr_path_planning/cbit/generate_pq.hpp | 21 + .../mpc/mpc_path_planner.hpp | 5 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 83 +++- .../src/cbit/cbit_path_planner.cpp | 465 +++++++++++------- .../src/cbit/generate_pq.cpp | 45 +- main/src/vtr_path_planning/src/cbit/utils.cpp | 1 + .../src/mpc/mpc_path_planner.cpp | 330 +++++++------ rviz/honeycomb.rviz | 104 +++- 19 files changed, 1045 insertions(+), 568 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 353bc7c84..5dc3b1855 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -17,14 +17,16 @@ # "tactic.module.live_mem_manager", # "tactic.module.graph_mem_manager", # path planner - #"path_planning", + "path_planning", #"path_planning.teb", - #"path_planning.cbit", - #"path_planning.cbit_planner", - # "mpc.cbit", - # "mpc_debug.cbit", + "path_planning.cbit", + "path_planning.cbit_planner", + #"mpc.cbit", + #"mpc_debug.cbit", + #"obstacle_detection.cbit", # mission planner "mission.server", + #"mpc_debug", #"mission.state_machine", # pose graph # "pose_graph", @@ -54,9 +56,9 @@ lidar_topic: /points ############ map projection configuration ############ - graph_map: - origin_lat: 43.78220 - origin_lng: -79.4661 + graph_projection: + origin_lat: 43.78220 # PETAWAWA: 45.8983, # UTIAS: 43.78220 + origin_lng: -79.4661 # PETAWA: -77.2829, # UTIAS: -79.4661 origin_theta: 1.3 scale: 1.0 @@ -133,7 +135,7 @@ crop_range: 40.0 - frame_voxel_size: 0.2 # grid subsampling voxel size + frame_voxel_size: 0.1 # grid subsampling voxel size vertical_angle_res: 0.0132645 # vertical angle resolution in radius, equal to 0.76 degree documented in the manual polar_r_scale: 2.0 # polar_r_scale x vertical_angle_res = nearest neighbor search radius for normal computation @@ -157,7 +159,7 @@ type: lidar.odometry_icp # continuous time estimation - use_trajectory_estimation: true + use_trajectory_estimation: false traj_num_extra_states: 0 traj_lock_prev_pose: false traj_lock_prev_vel: false @@ -185,7 +187,7 @@ mapping: type: lidar.odometry_map_maintenance_v2 - map_voxel_size: 0.2 + map_voxel_size: 0.1 crop_range_front: 40.0 back_over_front_ratio: 0.5 @@ -205,7 +207,7 @@ depth: 6.0 - map_voxel_size: 0.2 + map_voxel_size: 0.1 crop_range_front: 40.0 back_over_front_ratio: 0.5 @@ -219,7 +221,7 @@ horizontal_resolution: 0.041 # 0.02042 vertical_resolution: 0.026 # 0.01326 - max_num_observations: 2000 + max_num_observations: 10000 min_num_observations: 4 dynamic_threshold: 0.3 @@ -228,7 +230,7 @@ inter_exp_merging: type: "lidar.inter_exp_merging_v2" - map_voxel_size: 0.2 + map_voxel_size: 0.1 max_num_experiences: 128 distance_threshold: 0.6 @@ -294,7 +296,7 @@ detection_range: 8.0 search_radius: 0.25 - negprob_threshold: 0.1 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives + negprob_threshold: 0.05 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives use_prior: true alpha0: 3.0 # 6 / 2 (6 pseudo observations) beta0: 0.03 # 0.01 * 6 / 2 (6 pseudo observations of variance 0.01) @@ -304,10 +306,11 @@ support_variance: 0.05 support_threshold: 2.5 - influence_distance: 0.55 # was 0.5 Jordy - minimum_distance: 0.35 # was 0.3 Jordy + influence_distance: 0.50 # was 0.5 Jordy # Note that the total distancr=e where grid cells have values > 0 is min dist + influence dist, not influence dist! + minimum_distance: 0.9 # was 0.3 Jordy # cost map + costmap_history_size: 15 # Keep between 3 and 20, used for temporal filtering resolution: 0.25 size_x: 16.0 size_y: 8.0 @@ -405,8 +408,8 @@ ############ path planning configuration ############ path_planning: - type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version - control_period: 33 # ms + type: cbit.lidar # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version + control_period: 50 # ms teb: # vtr specific visualize: true @@ -432,7 +435,7 @@ # Environment obs_padding: 0.0 curv_to_euclid_discretization: 20 - sliding_window_width: 10.0 + sliding_window_width: 12.0 sliding_window_freespace_padding: 0.5 corridor_resolution: 0.05 state_update_freq: 1.0 @@ -441,7 +444,7 @@ # Planner Tuning Params initial_samples: 350 - batch_samples: 100 + batch_samples: 200 pre_seed_resolution: 0.5 alpha: 0.5 q_max: 2.5 @@ -458,22 +461,28 @@ costmap: costmap_filter_value: 0.01 - costmap_history: 15 + costmap_history: 20 # Note I dont think im actually using this one anymore, am using the one in the change detection v3 now mpc: # Controller Params - horizon_steps: 40 - horizon_step_size: 0.1 - forward_vel: 1.0 + horizon_steps: 20 + horizon_step_size: 0.3 + forward_vel: 0.75 max_lin_vel: 1.25 max_ang_vel: 0.75 vehicle_model: "unicycle" # Cost Function Weights pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 - vel_error_cov: [0.1, 1.0] # was [0.1 2.0] - acc_error_cov: [0.1, 0.75] + vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term + acc_error_cov: [2.0, 2.0] kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] + #backup + #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + #vel_error_cov: [0.1, 1.0] # was [0.1 2.0] + #acc_error_cov: [0.1, 0.75] + #kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] + # Misc command_history_length: 100 diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index fcf703092..24c9a1e3b 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -6,6 +6,7 @@ environment: # ROS_DOMAIN_ID: "1" # set this to a unique number when multiple ROS2 dependent system running on the same network # data_dir:=${VTRDATA}/online-test-lidar/$(date '+%F')/$(date '+%F') # data_dir:=${VTRDATA}/online-test-lidar/2022-09-08/2022-09-08 + # data_dir:=${VTRDATA}/online-test-lidar/2023-01-18/2023-01-18 # Nice dome path start_directory: ${VTRDATA} diff --git a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js index c4657a51d..618c05978 100644 --- a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js +++ b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js @@ -214,7 +214,7 @@ class GraphMap extends React.Component { <> {/* Leaflet map container with initial center set to UTIAS (only for initialization) */} TODO We should make this set dynamically from the yaml config*/ zoom={18} zoomControl={false} whenCreated={this.mapCreatedCallback.bind(this)} diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp index 0da27e9ea..ca22a8eff 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v3.hpp @@ -58,6 +58,7 @@ class ChangeDetectionModuleV3 : public tactic::BaseModule { float support_threshold = 0.0; // cost map + int costmap_history_size = 10; float resolution = 1.0; float size_x = 20.0; float size_y = 20.0; diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc.hpp index 4ec60ce95..b6faa57cf 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc.hpp @@ -43,4 +43,7 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 struct meas_result GenerateReferenceMeas(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); // Helper function for post-processing and saturating the velocity command -Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, double v_lim, double w_lim); \ No newline at end of file +Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, double v_lim, double w_lim); + +// Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path +lgmath::se3::Transformation InterpolateMeas(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_lidar/src/data_types/costmap.cpp b/main/src/vtr_lidar/src/data_types/costmap.cpp index e37cc3625..1cec432a1 100644 --- a/main/src/vtr_lidar/src/data_types/costmap.cpp +++ b/main/src/vtr_lidar/src/data_types/costmap.cpp @@ -136,16 +136,35 @@ void DenseCostMap::update( // Modification by Jordy for updating dense maps from unordered maps with pairs of points () void DenseCostMap::update( - const std::unordered_map, float> values) { + const std::unordered_map, float> values) { + for (const auto val : values) { - const auto shifted_k = costmap::PixKey(val.first.first / dl_, val.first.second / dl_) - origin_; + auto shifted_k = costmap::PixKey(val.first.first / dl_, val.first.second / dl_) - origin_; + //const auto shifted_k_x = val.first.first;// - origin_.x; //const auto shifted_k_y = val.first.second;// - origin_.y; - //CLOG(INFO, "obstacle_detection.cbit") << "shifted_k_x" << shifted_k_x; - //CLOG(INFO, "obstacle_detection.cbit") << "shifted_k_y" << shifted_k_y; - //CLOG(INFO, "obstacle_detection.cbit") << "origin_x" << origin_.x; - //CLOG(INFO, "obstacle_detection.cbit") << "origin_y" << origin_.y; + + + // Handling an error where after transformations the shifted key could fall outside the costmap area resulting in an eigen indexing error + if (shifted_k.x >= size_x_ / dl_) + { + shifted_k.x = size_x_ / dl_; + } + if (shifted_k.x <= -1) + { + shifted_k.x = 0; + } + if (shifted_k.y >= size_y_ / dl_) + { + shifted_k.y = size_y_ / dl_; + } + if (shifted_k.y <= -1) + { + shifted_k.y = 0; + } + values_(shifted_k.x, shifted_k.y) = val.second; + } } diff --git a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp b/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp index bd0b483c6..faa480b4c 100644 --- a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp +++ b/main/src/vtr_lidar/src/modules/planning/change_detection_module_v3.cpp @@ -112,6 +112,7 @@ auto ChangeDetectionModuleV3::Config::fromROS( config->support_variance = node->declare_parameter(param_prefix + ".support_variance", config->support_variance); config->support_threshold = node->declare_parameter(param_prefix + ".support_threshold", config->support_threshold); // cost map + config->costmap_history_size = node->declare_parameter(param_prefix + ".costmap_history_size", config->costmap_history_size); config->resolution = node->declare_parameter(param_prefix + ".resolution", config->resolution); config->size_x = node->declare_parameter(param_prefix + ".size_x", config->size_x); config->size_y = node->declare_parameter(param_prefix + ".size_y", config->size_y); @@ -315,7 +316,7 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, // Jordy Modifications for temporal costmap filtering (UNDER DEVELOPMENT) - /**/ + // declaration of the final costmap which we are outputting auto dense_costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); dense_costmap->T_vertex_this() = tactic::EdgeTransform(true); @@ -334,7 +335,7 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, auto& chain = *output.chain; auto T_w_c = chain.pose(sid_loc); auto T_c_w = T_w_c.inverse(); - CLOG(WARNING, "obstacle_detection.cbit") << "T_w_c: " << T_w_c; // debug + //CLOG(WARNING, "obstacle_detection.cbit") << "T_w_c: " << T_w_c; // debug // Initialize an unordered map to store the sparse world obstacle representations std::unordered_map, float> sparse_world_map; @@ -359,6 +360,11 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, float world_key_x = floor(collision_pt[0] / config_->resolution) * config_->resolution; float world_key_y = floor(collision_pt[1] / config_->resolution) * config_->resolution; + + // Experimental debug! Im thinking that perhaps here these keys could be outside the legal area + + + float world_value = kv.second; std::pair world_keys(world_key_x, world_key_y); sparse_world_map.insert(std::make_pair(world_keys,world_value)); @@ -372,9 +378,9 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, keys.push_back(kv.first); vals.push_back(kv.second); } - CLOG(ERROR, "obstacle_detection.cbit") << "The size of each map is" << sparse_obs_map.size(); - CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys; - CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals; + //CLOG(ERROR, "obstacle_detection.cbit") << "The size of each map is" << sparse_obs_map.size(); + //CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys; + //CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals; //CLOG(WARNING, "obstacle_detection.cbit") << "T_c_w " << T_c_w; //CLOG(WARNING, "obstacle_detection.cbit") << "T_m_s: " << T_m_s; @@ -383,23 +389,24 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, // if the costmap_history is smaller then some minimum value, just tack it on the end - if (costmap_history.size() < 10.0) + // TODO need to get rid of these magic numbers + if (costmap_history.size() < config_->costmap_history_size) { costmap_history.push_back(sparse_world_map); } // After that point, we then do a sliding window using shift operations, moving out the oldest map and appending the newest one else { - for (int i = 0; i < 9; i++) + for (int i = 0; i < (config_->costmap_history_size-1); i++) { costmap_history[i] = costmap_history[i + 1]; } - costmap_history[9] = sparse_world_map; + costmap_history[(config_->costmap_history_size-1)] = sparse_world_map; //CLOG(WARNING, "obstacle_detection.cbit") << "costmap_history size " <, float> merged_world_map = costmap_history[9]; + std::unordered_map, float> merged_world_map = costmap_history[(config_->costmap_history_size-1)]; //CLOG(WARNING, "obstacle_detection.cbit") << "merged world map size " <, float> filtered_world_map; for (int i = 0; i < (costmap_history.size()-1); i++) @@ -407,8 +414,6 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, merged_world_map.merge(costmap_history[i]); //CLOG(WARNING, "obstacle_detection.cbit") << "merged world map size " <= 3) @@ -434,8 +441,6 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, filtered_world_map.insert(std::make_pair(kv.first,kv.second)); } } - CLOG(WARNING, "obstacle_detection.cbit") << "filtered world map size " < filtered_vals; filtered_vals.reserve(filtered_world_map.size()); + for(auto kv : filtered_world_map) { float key_x = kv.first.first; @@ -457,16 +463,22 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, float loc_key_x = floor(collision_pt[0] / config_->resolution) * config_->resolution; float loc_key_y = floor(collision_pt[1] / config_->resolution) * config_->resolution; + + + // debug to check loc_key is in a valid range: + + float loc_value = kv.second; std::pair loc_keys(loc_key_x, loc_key_y); filtered_loc_map.insert(std::make_pair(loc_keys,loc_value)); } - CLOG(ERROR, "obstacle_detection.cbit") << "The size of the filtered map is" << filtered_loc_map.size(); - - + // Build the dense map, publish and save the results so the planner can access it //const auto dense_costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); // need to delcare this earlier + dense_costmap->update(filtered_loc_map); + + // add transform to the localization vertex //dense_costmap->T_vertex_this() = tactic::EdgeTransform(true); //dense_costmap->vertex_id() = vid_loc; @@ -478,13 +490,15 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); //costmap_pub_->publish(costmap_msg); - // publish the filtered occupancy grid + //CLOG(WARNING, "obstacle_detection.cbit") << "Successfully update the dense costmap"; + + + // publish the filtered occupancy grid auto filtered_costmap_msg = dense_costmap->toCostMapMsg(); filtered_costmap_msg.header.frame_id = "loc vertex frame"; // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); filtered_costmap_pub_->publish(filtered_costmap_msg); - // Debug check that the filtered maps look okay vtr::lidar::BaseCostMap::XY2ValueMap dense_map = dense_costmap->filter(0.01); std::vector> keys2; @@ -495,16 +509,15 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, keys2.push_back(kv.first); vals2.push_back(kv.second); } - CLOG(ERROR, "obstacle_detection.cbit") << "The size of the dense map is" << dense_map.size(); - CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys2; - CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals2; - + //CLOG(ERROR, "obstacle_detection.cbit") << "The size of the dense map is" << dense_map.size(); + //CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Keys: " << keys2; + //CLOG(ERROR, "obstacle_detection.cbit") << "Displaying all Values: " << vals2; } - + // End of Jordy's temporal filter changes @@ -550,6 +563,7 @@ void ChangeDetectionModuleV3::run_(QueryCache &qdata0, OutputCache &output0, CLOG(INFO, "lidar.change_detection") << "Change detection for lidar scan at stamp: " << stamp << " - DONE"; + } } // namespace lidar diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index c045ef6f0..424abf0ae 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -262,13 +262,13 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { kin_noise_vect = config_->kin_error_cov; - + // Extrapolating robot pose into the future by using the history of applied mpc velocity commands const auto curr_time = now(); // always in nanoseconds const auto dt = static_cast(curr_time - stamp) * 1e-9; - CLOG(INFO, "mpc_debug.cbit") << "History of the Robot Velocities" << vel_history; + CLOG(INFO, "mpc_debug.cbit") << "History of the Robot Velocities:" << vel_history; // Check the time past since the last state update was received // Go back through the vel_history to approximately dt seconds in the past @@ -277,18 +277,18 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { auto T_p_r2 = T_p_r; for (int i=std::floor(dt / control_period); i > 0; i--) { - //CLOG(DEBUG, "mpc_debug.cbit") << "The iteration Index i is: " << i; + CLOG(DEBUG, "mpc_debug.cbit") << "The iteration Index i is: " << i; w_p_r_in_r(0) = -1* vel_history[vel_history.size()-(i+1)][0]; w_p_r_in_r(1) = 0.0; w_p_r_in_r(2) = 0.0; w_p_r_in_r(3) = 0.0; w_p_r_in_r(4) = 0.0; w_p_r_in_r(5) = -1* vel_history[vel_history.size()-(i+1)][1]; - //CLOG(DEBUG, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; + CLOG(DEBUG, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; Eigen::Matrix xi_p_r_in_r(control_period * w_p_r_in_r); T_p_r2 = T_p_r2 * tactic::EdgeTransform(xi_p_r_in_r).inverse(); - //CLOG(DEBUG, "mpc_debug.cbit") << "Make sure the lie algebra is changing right:" << T_p_r2; + CLOG(DEBUG, "mpc_debug.cbit") << "Make sure the lie algebra is changing right:" << T_p_r2; } // Apply the final partial period velocity @@ -305,7 +305,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { const auto T_p_r_extp2 = T_p_r2; CLOG(DEBUG, "mpc_debug.cbit") << "New extrapolated pose:" << T_p_r_extp2; - + // Uncomment if we use the extrapolated robot pose for control (constant velocity model from odometry) //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp); @@ -315,6 +315,8 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // no extrapolation (comment this out if we are not using extrapolation) //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); + // TODO: Set whether to use mpc extrapolation as a config param (though there is almost never a good reason not to use it) + //Convert to x,y,z,roll, pitch, yaw std::tuple robot_pose = T2xyzrpy(T0); CLOG(DEBUG, "mpc_debug.cbit") << "The Current Robot Pose (from planning) is - x: " << std::get<0>(robot_pose) << " y: " << std::get<1>(robot_pose) << " yaw: " << std::get<5>(robot_pose); @@ -333,13 +335,22 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { bool point_stabilization = meas_result.point_stabilization; - // Create and solve the STEAM optimization problem - CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); - applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here - auto mpc_poses = mpc_result.mpc_poses; - CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; + std::vector mpc_poses; + try + { + CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; + auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here + mpc_poses = mpc_result.mpc_poses; + CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; + } + catch(...) + { + CLOG(ERROR, "mpc.cbit") << "STEAM Optimization Failed; Commanding to Stop the Vehicle"; + applied_vel(0) = 0.0; + applied_vel(1) = 0.0; + } CLOG(INFO, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); @@ -353,7 +364,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { vel_history.push_back(saturated_vel); // Store the current robot state in the robot state path so it can be visualized - robot_poses.push_back(T0); + robot_poses.push_back(T_w_p * T_p_r); // Send the robot poses and mpc prediction to rviz visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses); @@ -370,13 +381,11 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { << command.angular.z << "]"; return command; - - } // If localized and an initial solution is found. - + } // Otherwise stop the robot else { - CLOG(INFO, "mpc.cbit") << "There is not a valid plan yet, returning 0.0 velocity command"; + CLOG(INFO, "mpc.cbit") << "There is not a valid plan yet, returning zero velocity commands"; applied_vel << 0.0, 0.0; vel_history.erase(vel_history.begin()); @@ -385,5 +394,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { } } + + } // namespace lidar } // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_lidar/src/path_planning/mpc.cpp b/main/src/vtr_lidar/src/path_planning/mpc.cpp index 15edacfe3..a56174424 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc.cpp @@ -45,6 +45,15 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 0, 0, 0, -1; + // Lateral constraint projection matrices (Experimental) + Eigen::Matrix I_4; + I_4 << 0, + 0, + 0, + 1; + Eigen::Matrix I_2_tran; + I_2_tran << 0, 1, 0, 0; + // Setup shared loss functions and noise models for all cost terms const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); @@ -61,6 +70,12 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 const auto sharedKinNoiseModel = steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); + // Experimental lat constraint (hardcoded for now) + Eigen::Matrix lat_noise_vect; + lat_noise_vect << 1.0; + const auto sharedLatNoiseModel = + steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + // Generate STEAM States for the velocity vector and SE3 state transforms @@ -74,12 +89,13 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 // Pushback the initial states (current robot state) pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() - vel_states.push_back(v0); + vel_states.push_back(v0); // Set the remaining states for (int i=0; i previous_vel, lgmath::se3 for (int i=0; i previous_vel, lgmath::se3 //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); //opt_problem.addCostTerm(vel_cost_term); - // Experimental velocity set-point constraint (instead of non zero velocity penalty) + + + + // Kinematic constraints (softened but penalized heavily) + if (i < (K-1)) + { + const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); + const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself + const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); + const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); + const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(kin_cost_term); + + + // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) if (point_stabilization == false) { @@ -127,6 +164,7 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 // Experimental acceleration limits + if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state @@ -139,27 +177,29 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); opt_problem.addCostTerm(accel_cost_term); - } + } + + } - // Kinematic constraints (softened but penalized heavily) - if (i < (K-1)) - { - const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); - const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself - const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); - const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); - const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); - const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(kin_cost_term); - } + // Experimental Hard Lateral Constraints + /* + CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; + const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); + const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators + const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); + const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); + const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term); + CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; + */ } // Solve the optimization problem with GuassNewton solver using SolverType = steam::GaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; - params.verbose = false; // Makes the output display for debug + params.verbose = false; // Makes the output display for debug when true SolverType solver(opt_problem, params); solver.optimize(); @@ -243,152 +283,139 @@ struct meas_result GenerateReferenceMeas(std::shared_ptr> cbit // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) - // Take in the current euclidean path solution from the cbit planner in the world frame, the current robot state, and determine - // which measurements we wish to track to follow the path at the desired target velocity - CLOG(DEBUG, "mpc_debug.cbit") << "Reference Path Points:"; - std::vector measurements; - double starting_dist = INFINITY; + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; double new_dist; double dx; double dy; - double delta_dist = 0; - double index_counter = 0; - - // Find closest point on the path to the current state (new version, experimental) - double min_dist = INFINITY; - CLOG(DEBUG, "mpc_debug.cbit") << "Total CBIT Path Size: " << cbit_path.size(); - for (int i = 0; i < 200; i++) // magic number for now, corresponds to about a 5m lookahead (need to remove dependency on this) - { - // handle end of path case: - if (i >= cbit_path.size()) - { - break; - } - CLOG(DEBUG, "mpc_debug.cbit") << "x: " << (cbit_path)[i].x << "y: " << (cbit_path)[i].y << "z: " << (cbit_path)[i].z; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state dx = (cbit_path)[i].x - std::get<0>(robot_pose); dy = (cbit_path)[i].y - std::get<1>(robot_pose); new_dist = sqrt((dx * dx) + (dy * dy)); - CLOG(DEBUG, "mpc_debug.cbit") << "Dist to Pt: " << new_dist; if (new_dist < min_dist) { CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; - index_counter = i; // I wonder if we should add some buffer, as this point could still be behind the current robot + p_correction = lookahead_dist; min_dist = new_dist; } - } - /* - // Find closest point on the path to the current state (Old version, mostly worked, but I think if the cbit path was not smooth there was a chance it would exit early before finding closest point) - while (delta_dist >= 0) - { - CLOG(DEBUG, "mpc_debug.cbit") << "x: " << (cbit_path)[index_counter].x << "y: " << (cbit_path)[index_counter].y << "z: " << (cbit_path)[index_counter].z; - dx = (cbit_path)[index_counter].x - std::get<0>(robot_pose); - dy = (cbit_path)[index_counter].y - std::get<1>(robot_pose); - new_dist = sqrt((dx * dx) + (dy * dy)); - delta_dist = starting_dist - new_dist; - CLOG(DEBUG, "mpc_debug.cbit") << "Dist to Pt: " << new_dist; - CLOG(DEBUG, "mpc_debug.cbit") << "Delta Dist: " << delta_dist; - if (delta_dist >= 0) - { - starting_dist = new_dist; - index_counter = index_counter + 1; - } - else + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) { - CLOG(DEBUG, "mpc_debug.cbit") << "Delta Dist Negative, Return i = " << index_counter-1; - index_counter = index_counter - 1; + break; } } - */ + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; - // Keep iterating through the rest of the path, storing points in the path as measurements if they maintain an approximate - // forward path velocity of VF (//TODO, may need to also interpolate in some instances if we want to be very particular) - for (int i = index_counter; i < (cbit_path).size(); i++) + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) { - // The first iteration we need to add the closest point to the initial position as a measurement - // Subesequent iterations we want to select points on the path to track carefully based on the desired velocity we want to meet. - - // Reset the measurement distance - double delta_dist = 0.0; - if (index_counter != i) - { - // scan the path into the future until we proceed approximately VF*DT meters forward longitudinally long the path - // The resulting indice of the path will be the one we use for the next measurement - while ((delta_dist <= (VF*DT)) && (i < (cbit_path).size())) // Need to add this and condition to handle end of path case - { - - double prev_x = (cbit_path)[i-1].x; - double prev_y = (cbit_path)[i-1].y; - double next_x = (cbit_path)[i].x; - double next_y = (cbit_path)[i].y; - delta_dist = delta_dist + sqrt(((next_x-prev_x) * (next_x-prev_x)) + ((next_y-prev_y) * (next_y-prev_y))); - i = i + 1; - } - // With the above setup, pretty sure the resulting i will be 1 too far when we break the loop, so we need to decrement it once at the end - i = i-1; - } + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas(p_meas_vec[i], cbit_p, cbit_path); + // add to measurement vector + measurements.push_back(meas); + } - // Derive a yaw direction for each point based on the vector formed between the current point and the previous point on the path - double yaw = std::atan2(((cbit_path)[i].y - (cbit_path)[i-1].y), ((cbit_path)[i].x - (cbit_path)[i-1].x)); - //CLOG(DEBUG, "mpc_debug.cbit") << "The yaw of the path pt is: " << yaw; + return {measurements, point_stabilization}; - // Generate a reference Transformation matrix (ignores roll/pitch) - Eigen::Matrix4d T_ref; - T_ref << std::cos(yaw),-1*std::sin(yaw),0,(cbit_path)[i].x, - std::sin(yaw), std::cos(yaw),0,(cbit_path)[i].y, - 0, 0, 1,(cbit_path)[i].z, - 0, 0, 0, 1; - T_ref = T_ref.inverse().eval(); - measurements.push_back(lgmath::se3::Transformation(T_ref)); + //End of Experimental Changes - // Early break condition when the number of measurements we need is satisfied based on the horizon - if (measurements.size() == K) - { - break; - } - // TODO handle end of path case => will want to repeat the final measurements and turn problem into a point stabilization MPC. - - } +} - // If we reach the end of the path without generating enough measurements (approaching end of path), populate the measurements - // with the final point in the path (point stabilization problem) - if (measurements.size() < K) +// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// Then tries to output a euclidean pose interpolated for the desired p_meas. +lgmath::se3::Transformation InterpolateMeas(double p_val, std::vector cbit_p, std::vector cbit_path) +{ + // Find the lower bound of the p values + for (int i = 0; i < cbit_p.size(); i++) + { + if (cbit_p[i] < p_val) { - CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; - - // debug logging messages - /* - CLOG(ERROR, "mpc.cbit") << "Displaying all measurements so far:"; - CLOG(ERROR, "mpc.cbit") << "Size of cbit path is: " << cbit_path.size(); - CLOG(ERROR, "mpc.cbit") << "Size of measurements is: " << measurements.size(); - for (int k = 0; k < measurements.size(); k++) - { - CLOG(ERROR, "mpc.cbit") << "Measurement k: " << k << " is: " << measurements[k]; - } - - CLOG(ERROR, "mpc.cbit") << "The last valid transform was: " << measurements[measurements.size()-1]; - */ - for (int j = measurements.size(); j < K; j++) - { - measurements.push_back(measurements[j-1]); - //CLOG(ERROR, "mpc.cbit") << "Appending this transform to the measurements: " << measurements[j-1]; - } - - return {measurements, true}; + continue; } - else { - return {measurements, false}; - } + double p_lower = cbit_p[i-1]; + double p_upper = cbit_p[i]; + Pose pose_lower = cbit_path[i-1]; + Pose pose_upper = cbit_path[i]; + + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + // For yaw we need to be abit careful about sign and angle wrap around + // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts + + double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); -} + // Build the transformation matrix + Eigen::Matrix4d T_ref; + T_ref << std::cos(yaw_int),-1*std::sin(yaw_int),0, x_int, + std::sin(yaw_int), std::cos(yaw_int),0, y_int, + 0, 0, 1, z_int, + 0, 0, 0, 1; + T_ref = T_ref.inverse().eval(); + + lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); + + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; + return meas; + } + } +} // Simple function for checking that the current output velocity command is saturated between our mechanical velocity limits @@ -410,32 +437,49 @@ Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, } */ - if (applied_vel(0) >= v_lim) + if (abs(applied_vel(0)) >= v_lim) { - command_lin_x = v_lim; - } - else if (applied_vel(0) <= 0.0) - { - command_lin_x = 0.0; + if (applied_vel(0) > 0.0) + { + command_lin_x = v_lim; + } + else if (applied_vel(0) < 0.0) + { + command_lin_x = -1.0* v_lim; + } } + // Removed for bi-directional control purposes + //else if (applied_vel(0) <= 0.0) + //{ + // command_lin_x = 0.0; + //} else { command_lin_x = applied_vel(0) ; } - if (applied_vel(1) >= w_lim) + if (abs(applied_vel(1)) >= w_lim) { - command_ang_z = w_lim; - } - else if (applied_vel(1) <= -1*w_lim) - { - command_ang_z = -1*w_lim; + if (applied_vel(1) > 0.0) + { + command_ang_z = w_lim; + } + else if (applied_vel(1) < 0.0) + { + command_ang_z = -1.0 * w_lim; + } } + //else if (applied_vel(1) <= -1*w_lim) + //{ + // command_ang_z = -1*w_lim; + //} else { command_ang_z = applied_vel(1) ; } + // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 + saturated_vel << command_lin_x, command_ang_z; return saturated_vel; } diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index d83ba549e..c4040d63e 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -162,11 +162,16 @@ class CBIT : public BasePathPlanner { rclcpp::Publisher::SharedPtr mpc_path_pub_; rclcpp::Publisher::SharedPtr robot_path_pub_; rclcpp::Publisher::SharedPtr path_pub_; + rclcpp::Publisher::SharedPtr corridor_pub_l_; + rclcpp::Publisher::SharedPtr corridor_pub_r_; // Pointers to the output path std::vector cbit_path; std::shared_ptr> cbit_path_ptr; + // Pointers to the corridor + std::shared_ptr corridor_ptr; + tactic::Timestamp prev_stamp; // Store the previously applied velocity and a sliding window history of MPC results diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 070db66f9..ce34f6674 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -78,7 +78,7 @@ class CBITPlanner { // Costmap pointer std::shared_ptr cbit_costmap_ptr; - CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr); + CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); protected: struct ChainInfo { @@ -94,9 +94,9 @@ class CBITPlanner { private: void InitializePlanningSpace(); - void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr); + void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); void ResetPlanner(); - void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr); + void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); std::shared_ptr UpdateState(); std::vector> SampleBox(int m); std::vector> SampleFreeSpace(int m); @@ -121,11 +121,21 @@ class CBITPlanner { bool costmap_col_tight(Node node); bool discrete_collision(std::vector> obs, double discretization, Node start, Node end); bool col_check_path(); - std::shared_ptr col_check_path_v2(); + std::shared_ptr col_check_path_v2(double max_lookahead_p); void restore_tree(double g_T_update, double g_T_weighted_update); // Add class for Tree // Add dictionary (or some other structure) for the cost to come lookup using NodeID as key // Constructor: Needs to initialize all my objects im using + + + // Temporary functions for corridor updates, long term want to move these to a different file + void update_corridor(std::shared_ptr corridor, std::vector homotopy_p, std::vector homotopy_q, Node robot_state); + struct collision_result + { + bool bool_result; + Node col_node; + }; + struct collision_result discrete_collision_v2(double discretization, Node start, Node end); }; \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp index 6f3f18943..e6a8cc1b9 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp @@ -48,4 +48,25 @@ class CBITPath { // Actually I think ill just do this in the constructor for now //std::vector ProcessPath(std::vector disc_path); // Function for assigning p distance values for each euclidean point in pre-processing +}; + +// Class for storing the dynamic corridor information +class CBITCorridor { + public: + std::vector p_bins; + std::vector q_left; + std::vector q_right; + std::vector x_left; + std::vector x_right; + std::vector y_left; + std::vector y_right; + double q_max; + double sliding_window_width; + double curv_to_euclid_discretization; + + CBITCorridor(CBITConfig config, std::shared_ptr global_path_ptr); // Constructor, Feed this the taught path and config + CBITCorridor() = default; + + //void update_corridor(homotopy_p, homotopy_q, robot_state); + }; \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner.hpp index 4ec60ce95..b6faa57cf 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner.hpp @@ -43,4 +43,7 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 struct meas_result GenerateReferenceMeas(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); // Helper function for post-processing and saturating the velocity command -Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, double v_lim, double w_lim); \ No newline at end of file +Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, double v_lim, double w_lim); + +// Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path +lgmath::se3::Transformation InterpolateMeas(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 48c5ef63b..2a6fd0589 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -118,6 +118,8 @@ CBIT::CBIT(const Config::ConstPtr& config, mpc_path_pub_ = node->create_publisher("mpc_prediction", 10); robot_path_pub_ = node->create_publisher("robot_path", 10); path_pub_ = node->create_publisher("planning_path", 10); + corridor_pub_l_ = node->create_publisher("corridor_path_left", 10); + corridor_pub_r_ = node->create_publisher("corridor_path_right", 10); // Updating cbit_configs // Environment @@ -229,16 +231,20 @@ void CBIT::initializeRoute(RobotState& robot_state) { CLOG(INFO, "path_planning.cbit") << "Trying to create global path"; // Create the path class object (Path preprocessing) CBITPath global_path(cbit_config, euclid_path_vec); - // Make a pointer to this path std::shared_ptr global_path_ptr = std::make_shared(global_path); + CLOG(INFO, "path_planning.cbit") << "Teach Path has been pre-processed. Attempting to initialize the dynamic corridor"; - CLOG(INFO, "path_planning.cbit") << "Teach Path has been pre-processed. Attempting to instantiate the planner"; + // Initialize the dynamic corridor + CBITCorridor corridor(cbit_config, global_path_ptr); + // Make a pointer to the corridor + corridor_ptr = std::make_shared(corridor); + CLOG(INFO, "path_planning.cbit") << "Corridor generated successfully. Attempting to instantiate the planner"; - // Instantiate the planner - CBITPlanner cbit(cbit_config, global_path_ptr, robot_state, cbit_path_ptr, costmap_ptr); + // Instantiate the planner + CBITPlanner cbit(cbit_config, global_path_ptr, robot_state, cbit_path_ptr, costmap_ptr, corridor_ptr); CLOG(INFO, "path_planning.cbit") << "Planner successfully created and resolved"; } @@ -375,11 +381,21 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // Create and solve the STEAM optimization problem - CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); - applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here - auto mpc_poses = mpc_result.mpc_poses; - CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; + std::vector mpc_poses; + try + { + CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; + auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here + mpc_poses = mpc_result.mpc_poses; + CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; + } + catch(...) + { + CLOG(ERROR, "mpc.cbit") << "STEAM Optimization Failed; Commanding to Stop the Vehicle"; + applied_vel(0) = 0.0; + applied_vel(1) = 0.0; + } CLOG(INFO, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); @@ -393,7 +409,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { vel_history.push_back(saturated_vel); // Store the current robot state in the robot state path so it can be visualized - robot_poses.push_back(T0); + robot_poses.push_back(T_w_p * T_p_r); // Send the robot poses and mpc prediction to rviz visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses); @@ -515,7 +531,6 @@ void CBIT::visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform // Attempting to publish the actual path which we are receiving from the shared pointer in the cbitplanner // The path is stored as a vector of se3 Pose objects from cbit/utils, need to iterate through and construct proper ros2 nav_msgs PoseStamped - /// Publish the intermediate goals in the planning frame { nav_msgs::msg::Path path; path.header.frame_id = "world"; @@ -541,6 +556,52 @@ void CBIT::visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform path_pub_->publish(path); } + + + + // Attempting to Publish the left and right dynamic corridor for the current path homotopy class + { + nav_msgs::msg::Path corridor_left; + corridor_left.header.frame_id = "world"; + corridor_left.header.stamp = rclcpp::Time(stamp); + auto& poses_l = corridor_left.poses; + + nav_msgs::msg::Path corridor_right; + corridor_right.header.frame_id = "world"; + corridor_right.header.stamp = rclcpp::Time(stamp); + auto& poses_r = corridor_right.poses; + + // iterate through the corridor paths + geometry_msgs::msg::Pose test_pose_l; + geometry_msgs::msg::Pose test_pose_r; + for (unsigned i = 0; i < corridor_ptr->x_left.size(); i++) + { + //lhs + auto& pose_l = poses_l.emplace_back(); + test_pose_l.position.x = corridor_ptr->x_left[i]; + test_pose_l.position.y = corridor_ptr->y_left[i]; + test_pose_l.position.z = 0.0; // setting this 0.0 for now for flat world assumption, but long term we might want to add a z component + test_pose_l.orientation.x = 0.0; + test_pose_l.orientation.y = 0.0; + test_pose_l.orientation.z = 0.0; + test_pose_l.orientation.w = 1.0; + pose_l.pose = test_pose_l; + + // rhs + auto& pose_r = poses_r.emplace_back(); + test_pose_r.position.x = corridor_ptr->x_right[i]; + test_pose_r.position.y = corridor_ptr->y_right[i]; + test_pose_r.position.z = 0.0; // setting this 0.0 for now for flat world assumption, but long term we might want to add a z component + test_pose_r.orientation.x = 0.0; + test_pose_r.orientation.y = 0.0; + test_pose_r.orientation.z = 0.0; + test_pose_r.orientation.w = 1.0; + pose_r.pose = test_pose_r; + } + + corridor_pub_l_->publish(corridor_left); + corridor_pub_r_->publish(corridor_right); + } return; } diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index f9442bab2..e9e7f550b 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -28,7 +28,7 @@ namespace { inline std::tuple T2xyzrpy( const vtr::tactic::EdgeTransform& T) { const auto Tm = T.matrix(); - return std::make_tuple(Tm(0, 3), Tm(1, 3), Tm(2,3), std::atan2(Tm(2, 1), Tm(2, 2)), std::atan2(-1*Tm(2, 0), sqrt(pow(Tm(2, 1),2) + pow(Tm(2, 2),2))), std::atan2(Tm(1, 0), Tm(0, 0))); + return std::make_tuple(Tm(0, 3), Tm(1, 3), Tm(2,3), std::atan2(Tm(2, 1), Tm(2, 2)), std::atan2(-1.0*Tm(2, 0), sqrt(pow(Tm(2, 1),2) + pow(Tm(2, 2),2))), std::atan2(Tm(1, 0), Tm(0, 0))); } } @@ -44,7 +44,7 @@ auto CBITPlanner::getChainInfo(vtr::path_planning::BasePathPlanner::RobotState& } // Class Constructor: -CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr) +CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) { // Setting random seed @@ -78,7 +78,7 @@ CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, //initialize_plot(); InitializePlanningSpace(); - Planning(robot_state, costmap_ptr); + Planning(robot_state, costmap_ptr, corridor_ptr); // DEBUG CODE, ROBOT STATE UPDATE QUERY EXAMPLE /* @@ -142,7 +142,7 @@ void CBITPlanner::InitializePlanningSpace() } // Reset fuction which goes through the entire reset procedure (including calling ResetPlanner and restarting the planner itself) -void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr) +void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) { // I think we may also want to add a small time delay just so we arent repeatedly spamming an optimal solution CLOG(ERROR, "path_planning.cbit_planner") << "Plan could not be improved, Initiating Reset"; @@ -205,7 +205,7 @@ void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& rob CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots goal is now: p: " << p_goal->p << " q: " << p_goal->q; CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots start is now: p: " << p_start->p << " q: " << p_start->q << " g_T: " << p_start->g_T; - Planning(robot_state, costmap_ptr); + Planning(robot_state, costmap_ptr, corridor_ptr); } // If we ever exit the planner due to a fault, we will do a hard reset, everything but the current robot_state (p_goal) and the inputs will be reinitialized @@ -237,7 +237,7 @@ void CBITPlanner::ResetPlanner() // Main planning function -void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr) +void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) { // find some better places to initialize these double prev_path_cost = INFINITY; // experimental @@ -539,13 +539,14 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // First grab the latest obstacles (could potentially take this opportunity to make a more compact approximate obs representation) // Collision Check the batch solution: TODO:, will need to make wormhole modifcations long term - std::shared_ptr col_free_vertex = col_check_path_v2(); // outputs NULL if no collision + std::shared_ptr col_free_vertex = col_check_path_v2((p_goal->p + conf.sliding_window_width + conf.sliding_window_freespace_padding)); // outputs NULL if no collision if (col_free_vertex != nullptr) { // If there is a collision, prune the tree of all vertices to the left of the this vertex CLOG(WARNING, "path_planning.cbit_planner") << "Collision Detected:"; CLOG(WARNING, "path_planning.cbit_planner") << "Collision Free Vertex is - p: " << col_free_vertex->p << " q: " << col_free_vertex->q; - + + // Vertex Prune (maintain only vertices to the right of the collision free vertex) std::vector> pruned_vertex_tree; pruned_vertex_tree.reserve(tree.V.size()); @@ -558,7 +559,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } tree.V = pruned_vertex_tree; - // Edge Prune (meaintain only edges to the right of the collision free vertex) + // Edge Prune (maintain only edges to the right of the collision free vertex) std::vector, std::shared_ptr>> pruned_edge_tree; pruned_edge_tree.reserve(tree.E.size()); for (int i = 0; i (p_start)); + // End of experimental // Reset the goal, and add it to the samples p_goal->parent = nullptr; @@ -592,6 +601,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Sample free-space wont generate pre-seeds, so we should generate our own in the portion of the tree that was dropped (col_free_vertex.p to p_goal.p) int pre_seeds = abs(col_free_vertex->p - p_goal->p) / 0.25; // Note needed to change p_goal to p_zero. When the sliding window padding is large, pre-seeds wont get generated all the way to the goal + //int pre_seeds = abs(p_start->p - p_goal->p) / 0.25; // EXPERIMENTAL< DONT FORGET TO REMOVE THIS AND UNCOMMENT ABOVE IF NOT USING THE FULL RESET METHOD double p_step = 0.25; double p_val = p_goal->p; for (int i = 0; i < (pre_seeds-1); i++) @@ -607,6 +617,12 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Reset the free space flag in sample freespace //CLOG(WARNING, "path_planning.cbit_planner") << "No Collision:"; repair_mode = false; + + + // EXPERIMENTAL: Updating the dynamic corridor (now that we know our path is collision free): + //update_corridor(corridor_ptr, path_x, path_y, *p_goal); + + } } @@ -691,6 +707,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo if (p_goal->g_T != INFINITY) { conf.initial_exp_rad = exp_radius((tree.V.size() + samples.size()), sample_box_height, sample_box_width, conf.eta); + CLOG(DEBUG, "path_planning.cbit_planner") << "New expansion radius is: " << conf.initial_exp_rad; } @@ -1684,7 +1701,6 @@ bool CBITPlanner::col_check_path() tree.E = pruned_edge_tree; - // TODO: potentially check if plotting is enabled, and if so, clear the current plot and basically reset and replot all the current samples, vertices, edges // (Might be a good idea to just make a function for this, it would probably come in handy) @@ -1716,7 +1732,7 @@ bool CBITPlanner::col_check_path() } -std::shared_ptr CBITPlanner::col_check_path_v2() +std::shared_ptr CBITPlanner::col_check_path_v2(double max_lookahead_p) { // Generate path to collision check @@ -1734,15 +1750,30 @@ std::shared_ptr CBITPlanner::col_check_path_v2() std::shared_ptr col_free_vertex = nullptr; // TODO, actually we probably only want to consider edges in the lookahead (not way far away down the path) but I might deal with this later - for (int i = curv_path.size()-1; i>=0; i--) + for (int i = curv_path.size()-1; i>=0; i--) // I decided to have it -3 instead of -1, this prevents us from collision checking the end of path, but I think I want that for now and it lets me in { Node vertex = *curv_path[i]; + // If the vertex in the path is outside of our sliding window then skip it + // This prevents us from having to rewire the path for distance obstacles outside of our window (especially in cases where our path crosses itself) + if (vertex.p > max_lookahead_p) + { + continue; + } Node euclid_pt = curve_to_euclid(vertex); //if (is_inside_obs(obs_rectangle, euclid_pt)) // Legacy collision checking if (costmap_col_tight(euclid_pt)) { - col_free_vertex = curv_path[i+1]; // take the vertex just before the collision vertex + //col_free_vertex = curv_path[i+1]; // take the vertex just before the collision vertex + if (i+4 < curv_path.size()-1) + { + col_free_vertex = curv_path[i+4]; // im actually finding that this vertex may be a little too close to the obstacles. Better to take one slightly further ahead if we can + } + else + { + col_free_vertex = curv_path[i+1]; + } } + } return col_free_vertex; } @@ -1957,108 +1988,68 @@ bool CBITPlanner::is_inside_obs(std::vector> obs, Node node) return false; } -// DEBUG: Experimental costmap collision checking -bool CBITPlanner::costmap_col_tight(Node node) -{ - // DEBUG: Make a spoofed obstacle to add to the costmap to make sure collision detection works - //std::pair fake_key(4.0, 0.0); - //float fake_value = 1.0; - //cbit_costmap_ptr->obs_map.insert(std::make_pair(fake_key,fake_value)); +// This collision check is only used at the end of each batch and determines whether the path should be rewired using the bare minimum obstacle distance +// Under normal operation we plan paths around a slightly more conservative buffer around each obstacle (equal to influence dist + min dist) +bool CBITPlanner::costmap_col_tight(Node node) +{ //CLOG(DEBUG, "path_planning.cbit_planner") << "Original Node: x: " << node.p << " y: " << node.q << " z: " << node.z; - Eigen::Matrix test_pt({node.p, node.q, node.z, 1}); - - - // Experimental, temporal filter (iterate through a sliding window of collision maps) - // For the moment what it does is collision check a history of 5 maps, and if any of those maps result in a collision, return collision - // Should make fluttery obstacles stay in place for about a second now, but the downside is false positives will also hang around, so may need to deal with this still - // Maybe by taking a vote? idk lets see how fast this is first - - //bool collision_result = false; - int vote_counter = 0; + // I am no longer temporally filtering here, instead this takes place in the costmap itself in the change detection module + // This means the costmap and transform size should never be more than 1 for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) { - //auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; - auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; - - //CLOG(DEBUG, "path_planning.cbit_planner") << "Displaying the point in the costmap frame we are trying to collision check: " << collision_pt; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X: " << collision_pt[0]; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Y: " << collision_pt[1]; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Resolution: " << cbit_costmap_ptr->grid_resolution; + auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; float grid_value; // Check to see if the point is in the obstacle map - // Note may just make more sense to bring in the returns to this try/catch - try { + // We need to use a try/catch in this metod as if the key value pair doesnt exist (it usually wont) we catch an out of range error + try + { // Block of code to try grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; } - catch (std::out_of_range) { - // Block of code to handle errors + catch (std::out_of_range) + { grid_value = 0.0; - //CLOG(ERROR, "path_planning.cbit_planner") << "Something went wrong in collision check!!!"; } - if (grid_value > 0.0) + if (grid_value >= 0.89) // By switching this from > 0.0 to 0.9, we effectively only collision check the path out to the "minimum_distance" obs config param { - //vote_counter = vote_counter + 1; //debug, switching to external filter return true; } - - //debug, switching to external filter - //if (vote_counter >= 3)// Magic number for now - //{ - // return true; - //} - } + + // If we make it here can return false for no collision return false; } -// DEBUG: Experimental costmap collision checking + +// More conservative costmap checking out to a distance of "influence_distance" + "minimum_distance" away bool CBITPlanner::costmap_col(Node node) { - - // DEBUG: Make a spoofed obstacle to add to the costmap to make sure collision detection works - //std::pair fake_key(4.0, 0.0); - //float fake_value = 1.0; - //cbit_costmap_ptr->obs_map.insert(std::make_pair(fake_key,fake_value)); - //CLOG(DEBUG, "path_planning.cbit_planner") << "Original Node: x: " << node.p << " y: " << node.q << " z: " << node.z; - Eigen::Matrix test_pt({node.p, node.q, node.z, 1}); - - - // Experimental, temporal filter (iterate through a sliding window of collision maps) - // For the moment what it does is collision check a history of 5 maps, and if any of those maps result in a collision, return collision - // Should make fluttery obstacles stay in place for about a second now, but the downside is false positives will also hang around, so may need to deal with this still - // Maybe by taking a vote? idk lets see how fast this is first - - //bool collision_result = false; - int vote_counter = 0; for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) { //auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Displaying the point in the costmap frame we are trying to collision check: " << collision_pt; //CLOG(DEBUG, "path_planning.cbit_planner") << "X: " << collision_pt[0]; //CLOG(DEBUG, "path_planning.cbit_planner") << "Y: " << collision_pt[1]; @@ -2069,130 +2060,88 @@ bool CBITPlanner::costmap_col(Node node) float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; - // EXPERIMENTAL - // want to explore also collision checking a small region around the actual x,y key too during active collision checking, this will prevent excessive rewiring - // It does increase the number of collision checks by a factor of 9 though, so might want to consider other options this was just the quick and dirty top of my head way - std::vector x_key_arr; - std::vector y_key_arr; - x_key_arr.clear(); - y_key_arr.clear(); - // just going to hard code for now because im tired and its getting late, dont leave this - x_key_arr.push_back(x_key); - y_key_arr.push_back(y_key); - /* - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key); - - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key); - - x_key_arr.push_back(x_key); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; - x_key_arr.push_back(x_key); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); + float grid_value; + // Check to see if the point is in the obstacle map + // If it isnt in the map we will catch an out of range error + try + { + grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); + //CLOG(DEBUG, "debug") << " The Grid value for this cell is: " << grid_value; + } - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); - - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); - - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution); - - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution); - */ - - // second layer - /* - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key); - - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key); - - x_key_arr.push_back(x_key); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); - - x_key_arr.push_back(x_key); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + catch (std::out_of_range) + { - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); + grid_value = 0.0; + } - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + if (grid_value > 0.0) + { + return true; + } - x_key_arr.push_back(x_key + cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key - cbit_costmap_ptr->grid_resolution*2.0); + } + // If we make it here, return false for no collision + return false; +} - x_key_arr.push_back(x_key - cbit_costmap_ptr->grid_resolution*2.0); - y_key_arr.push_back(y_key + cbit_costmap_ptr->grid_resolution*2.0); - */ +// note with new collision checker from global costmap I dont think we use this obs anymore? +bool CBITPlanner::discrete_collision(std::vector> obs, double discretization, Node start, Node end) +{ + // We dynamically determine the discretization based on the length of the edge + discretization = round(calc_dist(start, end) * discretization); + // Generate discretized test nodes + std::vector p_test; + std::vector q_test; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; + double p_step = fabs(end.p - start.p) / discretization; + double q_step = fabs(end.q - start.q) / discretization; + + p_test.push_back(start.p); + q_test.push_back(start.q); - float grid_value; + for (int i = 0; i < discretization-1; i++) + { + p_test.push_back(p_test[i] + p_step*sgn(end.p-start.p) ); + q_test.push_back(q_test[i] + q_step*sgn(end.q-start.q) ); + } + p_test.push_back(end.p); + q_test.push_back(end.q); - // Check to see if the point is in the obstacle map - // Note may just make more sense to bring in the returns to this try/catch - try { - // Block of code to try - //grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); - - //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; - // conservative buffer region: - for (int j=0; j < x_key_arr.size(); j++) - { + // Loop through the test curvilinear points, convert to euclid, collision check obstacles + for (int i = 0; i < p_test.size(); i++) + { + Node curv_pt = Node(p_test[i], q_test[i]); - // for some reason this commented out bit isnt working and we have to rely on the try catch loop, which I dont understand why, need to do some debugging still - //CLOG(ERROR, "path_planning.cbit_planner") << " X_key: " << x_key_arr[j] << " Y key: " << y_key_arr[j]; - //if ((cbit_costmap_ptr->obs_map_vect[i]).find(std::pair (x_key_arr[j], y_key_arr[j])) != (cbit_costmap_ptr->obs_map_vect[i]).end()) - //{ - //CLOG(ERROR, "path_planning.cbit_planner") << " Entered Loop "; - grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key_arr[j], y_key_arr[j])); - if (grid_value > 0.0) - { - //vote_counter = vote_counter + 1; //debug, switching to external filter - //CLOG(ERROR, "path_planning.cbit_planner") << "COLLISION DETECTED"; + // Convert to euclid TODO: + //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION + Node euclid_pt = curve_to_euclid(curv_pt); + //if (is_inside_obs(obs, euclid_pt)) + if (costmap_col(euclid_pt)) + { return true; - } - //} - } - } - catch (std::out_of_range) { - // Block of code to handle errors - grid_value = 0.0; - CLOG(ERROR, "path_planning.cbit_planner") << "Something went wrong in collision check!!!"; + } } - //if (grid_value > 0.0) - //{ - //vote_counter = vote_counter + 1; //debug, switching to external filter - // return true; - //} - - //debug, switching to external filter - //if (vote_counter >= 3)// Magic number for now - //{ - // return true; - //} - - } - return false; + return false; } -bool CBITPlanner::discrete_collision(std::vector> obs, double discretization, Node start, Node end) +// Corridor Update functions +// DEBUG: Long term when I restructure things I want these to be inside the generate_pq.cpp file +// The only thing keeping me from doing this right away is that all the curvilinear conversions and collision checkers are here +// But long term I should move all those over to utils or even a different header and script + +struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end) { // We dynamically determine the discretization based on the length of the edge discretization = round(calc_dist(start, end) * discretization); @@ -2218,9 +2167,10 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl // Loop through the test curvilinear points, convert to euclid, collision check obstacles + Node curv_pt; for (int i = 0; i < p_test.size(); i++) { - Node curv_pt = Node(p_test[i], q_test[i]); + curv_pt = Node(p_test[i], q_test[i]); // Convert to euclid TODO: //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION @@ -2228,11 +2178,176 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl //if (is_inside_obs(obs, euclid_pt)) if (costmap_col(euclid_pt)) { - return true; + return {true, curv_pt}; } } - return false; + return {false, curv_pt}; +} + +// dont love how this function is coded, could be done much more efficiently with some more effort I think +void CBITPlanner::update_corridor(std::shared_ptr corridor, std::vector homotopy_p, std::vector homotopy_q, Node robot_state) +{ + // Reset q_left and q_right (I did this on the python side, but I dont know why exactly, in theory I should be able to just + // update it incrementally in a sliding window? I must have had a reason for it but ill leave it out for now). + // Ah the reason i did this was because I was not handling the no collision case (in which we reset q_left to q_right for that bin to the max) + // Resetting at the beginning prevents needing to do this, but I think I prefer the way im doing it here. + + // Take a subset of the p_bins based on the current robot state and our dynamic window + std::vector p_bins_subset; + + // Note there is probably a much better way to do this which is faster exploiting that the vector is sorted, but for now just doing this for convenience + for (int i = 0; i < corridor->p_bins.size(); i++) + { + double p_bin = corridor->p_bins[i]; + if ((p_bin >= robot_state.p) && (p_bin <= robot_state.p + corridor->sliding_window_width)) + { + p_bins_subset.push_back(p_bin); + } + // Exit early if the p values become larger then the window + if (p_bin > robot_state.p + corridor->sliding_window_width) + { + break; + } + } + + + // Iterate through each of the subset of p_bins + double p_upper; + double p_lower; + double q_upper; + double q_lower; + for (int i = 0; i < p_bins_subset.size(); i++) + { + int ind_counter = 0; + + // iterate through the current path solution (in p,q space) + for (int j = 0; j < homotopy_p.size(); j++) + { + // If the point on the path is just larger then the p_bin_subset, take that point and the previous one, interpolate a q_bin value at the place + if (homotopy_p[j] >= p_bins_subset[i]) + { + p_upper = homotopy_p[ind_counter]; + p_lower = homotopy_p[ind_counter - 1]; + q_upper = homotopy_q[ind_counter]; + q_lower = homotopy_q[ind_counter - 1]; + break; + } + ind_counter = ind_counter + 1; + } + + double q_bin = q_lower + ((p_bins_subset[i] - p_lower) / (p_upper - p_lower)) * (q_upper - q_lower); + //CLOG(DEBUG, "path_planning.corridor_debug") << "q_bin is: " << q_bin; + Node start = Node(p_bins_subset[i], q_bin); // starting point for collision check + Node end_left = Node(p_bins_subset[i], corridor->q_max + 0.01); // end point for 1st collision check + a small buffer + Node end_right = Node(p_bins_subset[i], (-1.0 * corridor->q_max - 0.01)); // end point for 2nd collision check + a small buffer + //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; + //CLOG(DEBUG, "path_planning.corridor_debug") << "end_left node is p: " << end_left.p << " q: " << end_left.q; + + // Note long term we need to handle special case when the start point is in a wormhole region, but for now should be fine + //Node euclid_start = CBITPlanner::curve_to_euclid(start); + + + // debug, testing to see how often the bit* point is in a collision by the time we get here (in theory this should never happen but I think it is) + auto test_check = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, start); + if (test_check.bool_result == true) + { + CLOG(WARNING, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; + break; + } + + // collision check left and right using a special version of discrete_collision check + // In this version we output both a boolean and the 1st point that comes into collision if there is one + auto collision_check_result1 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_left); + auto collision_check_result2 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_right); + + // if there is a collision, set q_left at the location of the current p_bin being processed to the value of q_left/q_right + if (collision_check_result1.bool_result == true) + { + //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; + //CLOG(DEBUG, "path_planning.corridor_debug") << "end_left node is p: " << end_left.p << " q: " << end_left.q; + double q_left = collision_check_result1.col_node.q; + auto it = find(corridor->p_bins.begin(), corridor->p_bins.end(), p_bins_subset[i]); + if (it != corridor->p_bins.end()) + { + int index = it - corridor->p_bins.begin(); + corridor->q_left[index] = q_left; + CLOG(DEBUG, "path_planning.corridor_debug") << "Q_left is: " << q_left; + } + } + // else set it back to the maximums + else + { + double q_left = corridor->q_max; + auto it = find(corridor->p_bins.begin(), corridor->p_bins.end(), p_bins_subset[i]); + if (it != corridor->p_bins.end()) + { + int index = it - corridor->p_bins.begin(); + corridor->q_left[index] = q_left; + } + } + + // Repeat for the other side + + if (collision_check_result2.bool_result == true) + { + //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; + //CLOG(DEBUG, "path_planning.corridor_debug") << "end_right node is p: " << end_right.p << " q: " << end_right.q; + double q_right = collision_check_result2.col_node.q; + auto it = find(corridor->p_bins.begin(), corridor->p_bins.end(), p_bins_subset[i]); + if (it != corridor->p_bins.end()) + { + int index = it - corridor->p_bins.begin(); + corridor->q_right[index] = q_right; + CLOG(DEBUG, "path_planning.corridor_debug") << "Q_right is: " << q_right; + } + } + else + { + double q_right = -1.0 * corridor->q_max; + auto it = find(corridor->p_bins.begin(), corridor->p_bins.end(), p_bins_subset[i]); + if (it != corridor->p_bins.end()) + { + int index = it - corridor->p_bins.begin(); + corridor->q_right[index] = q_right; + } + } + + } + + + // Updating the full euclidean corridor vectors by iterating through all bins. + + // TODO: Make whether we do this a configurable parameter, I realised that I dont actually need this from the control perspective at all + // Its purely for visualization purposes and it will waste some compute (not much though so im not too concerned for everyday use) + + // Note for convenience I can do this in a separate loop, but really we could also be doing this incrementally the same way we update + // q_left/q_right. This requires a proper initialization of the euclid corridor in generate_pq.cpp, which is certainly possible + // But not currently because the curve_to_euclid is not in the utils package. When I change that we can do this. Until then, brute force it is. + + // Benchmarking the compute time for this operation since its likely much less efficient then it could be with the incremental approach + // Need to first clear out the old corridor, otherwise it just keeps stacking + corridor->x_left.clear(); + corridor->y_left.clear(); + corridor->x_right.clear(); + corridor->y_right.clear(); + //auto corridor_start_time = std::chrono::high_resolution_clock::now(); + for (int i = 0; i < corridor->p_bins.size(); i++) + { + Node euclid_left = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_left[i])); + Node euclid_right = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_right[i])); + + //CLOG(ERROR, "path_planning.corridor_debug") << "Euclid Left is x: " << euclid_left.p << " y: " << euclid_right.p; + corridor->x_left.push_back(euclid_left.p); + corridor->y_left.push_back(euclid_left.q); + corridor->x_right.push_back(euclid_right.p); + corridor->y_right.push_back(euclid_right.q); + } + + //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); + //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); + //CLOG(ERROR, "path_planning.corridor_debug") << "Corridor Update Time: " << duration_corridor.count() << "ms"; + } diff --git a/main/src/vtr_path_planning/src/cbit/generate_pq.cpp b/main/src/vtr_path_planning/src/cbit/generate_pq.cpp index ce74dc106..bc866773e 100644 --- a/main/src/vtr_path_planning/src/cbit/generate_pq.cpp +++ b/main/src/vtr_path_planning/src/cbit/generate_pq.cpp @@ -43,7 +43,7 @@ CBITPath::CBITPath(CBITConfig config, std::vector initial_path) p.push_back(p[i-1] + delta_p_calc(disc_path[i-1], disc_path[i], alpha)); } - CLOG(INFO, "path_planning.teb") << "Successfully Built a Path in generate_pq.cpp and Displayed log"; + CLOG(INFO, "path_planning.cbit") << "Successfully Built a Path in generate_pq.cpp and Displayed log"; } @@ -76,4 +76,47 @@ void CBITPath::spline_curvature() // TODO auto test = 1; +} + + +// Corridor Path constructor: +CBITCorridor::CBITCorridor(CBITConfig config, std::shared_ptr global_path_ptr) +{ + q_max = config.q_max; + sliding_window_width = config.sliding_window_width + config.sliding_window_freespace_padding; + curv_to_euclid_discretization = config.curv_to_euclid_discretization; + double length_p = global_path_ptr->p.back(); + int num_bins = ceil(length_p / config.corridor_resolution); + // Initialize vector lengths + p_bins.reserve(num_bins); + q_left.reserve(num_bins); + q_right.reserve(num_bins); + x_left.reserve(num_bins); + x_right.reserve(num_bins); + y_left.reserve(num_bins); + y_right.reserve(num_bins); + + // Initialize bins + p_bins = linspace(0, length_p, num_bins); + for (int i = 0; i < num_bins; i++) + { + q_left.push_back(config.q_max); + q_right.push_back(-1.0 * config.q_max); + } + + // I think its wise if here we also initialize the euclidean corridor points as well. This is annoying though because out curv to euclid + // is not in utils (yet). TODO I really should move all the collision checking things external and independant from the planner for this reason + + // For now I may instead brute force the euclid generate at the end of each corridor update, I feel like even a 50000 bin loop (2.5km) wouldnt take + // more then a few ms (I believe early experiments showed you could do like 4million loop iterations every second or so in c++) + + // debug prints to make sure this happened correctly + CLOG(DEBUG, "path_planning.corridor_debug") << "Length of P is: " << length_p; + CLOG(DEBUG, "path_planning.corridor_debug") << "Number of Bins is: " << num_bins; + CLOG(DEBUG, "path_planning.corridor_debug") << "P_bins are: " << p_bins; + CLOG(DEBUG, "path_planning.corridor_debug") << "Q_left is: " << q_left; + CLOG(DEBUG, "path_planning.corridor_debug") << "Q_right is: " << q_right; + CLOG(DEBUG, "path_planning.corridor_debug") << "Size of p_bins: " << p_bins.size(); + CLOG(DEBUG, "path_planning.corridor_debug") << "Size of q_left,q_right: " << q_left.size(); + } \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/utils.cpp b/main/src/vtr_path_planning/src/cbit/utils.cpp index f64725810..fea689a3a 100644 --- a/main/src/vtr_path_planning/src/cbit/utils.cpp +++ b/main/src/vtr_path_planning/src/cbit/utils.cpp @@ -224,6 +224,7 @@ double exp_radius(double q, double sample_box_height, double sample_box_width, d } // Moving this to cbit.cpp so I dont need to pass the whole giant path vector every time I call +// though actually I think long term I may just want to pass the pointer and still do it in utils, I want to clean up what is in cbitplanner /* // Function for converting a p,q coordinate value into a euclidean coordinate using the pre-processed path to follow Node curve_to_euclid(Node node, std::vector<) diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp index db9b5cc25..561006f2b 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp @@ -13,7 +13,7 @@ // limitations under the License. /** - * \file mpc_path_planner.cpp + * \file mpc.cpp * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) */ #include "vtr_path_planning/mpc/mpc_path_planner.hpp" @@ -45,6 +45,15 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 0, 0, 0, -1; + // Lateral constraint projection matrices (Experimental) + Eigen::Matrix I_4; + I_4 << 0, + 0, + 0, + 1; + Eigen::Matrix I_2_tran; + I_2_tran << 0, 1, 0, 0; + // Setup shared loss functions and noise models for all cost terms const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); @@ -61,6 +70,12 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 const auto sharedKinNoiseModel = steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); + // Experimental lat constraint (hardcoded for now) + Eigen::Matrix lat_noise_vect; + lat_noise_vect << 1.0; + const auto sharedLatNoiseModel = + steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + // Generate STEAM States for the velocity vector and SE3 state transforms @@ -74,12 +89,13 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 // Pushback the initial states (current robot state) pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() - vel_states.push_back(v0); + vel_states.push_back(v0); // Set the remaining states for (int i=0; i previous_vel, lgmath::se3 for (int i=0; i previous_vel, lgmath::se3 //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); //opt_problem.addCostTerm(vel_cost_term); + + + // Kinematic constraints (softened but penalized heavily) + if (i < (K-1)) + { + const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); + const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself + const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); + const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); + const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(kin_cost_term); + + // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) if (point_stabilization == false) @@ -125,7 +161,9 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 opt_problem.addCostTerm(vel_cost_term); } + // Experimental acceleration limits + if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state @@ -138,27 +176,30 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); opt_problem.addCostTerm(accel_cost_term); - } + } + + } - // Kinematic constraints (softened but penalized heavily) - if (i < (K-1)) - { - const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); - const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself - const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); - const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); - const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); - const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(kin_cost_term); - } + + // Experimental Hard Lateral Constraints + /* + CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; + const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); + const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators + const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); + const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); + const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term); + CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; + */ } // Solve the optimization problem with GuassNewton solver using SolverType = steam::GaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; - params.verbose = false; // Makes the output display for debug + params.verbose = false; // Makes the output display for debug when true SolverType solver(opt_problem, params); solver.optimize(); @@ -242,152 +283,139 @@ struct meas_result GenerateReferenceMeas(std::shared_ptr> cbit // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) - // Take in the current euclidean path solution from the cbit planner in the world frame, the current robot state, and determine - // which measurements we wish to track to follow the path at the desired target velocity - CLOG(DEBUG, "mpc_debug.cbit") << "Reference Path Points:"; - std::vector measurements; - double starting_dist = INFINITY; + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; double new_dist; double dx; double dy; - double delta_dist = 0; - double index_counter = 0; - - // Find closest point on the path to the current state (new version, experimental) - double min_dist = INFINITY; - CLOG(DEBUG, "mpc_debug.cbit") << "Total CBIT Path Size: " << cbit_path.size(); - for (int i = 0; i < 200; i++) // magic number for now, corresponds to about a 5m lookahead (need to remove dependency on this) - { - // handle end of path case: - if (i >= cbit_path.size()) - { - break; - } - CLOG(DEBUG, "mpc_debug.cbit") << "x: " << (cbit_path)[i].x << "y: " << (cbit_path)[i].y << "z: " << (cbit_path)[i].z; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state dx = (cbit_path)[i].x - std::get<0>(robot_pose); dy = (cbit_path)[i].y - std::get<1>(robot_pose); new_dist = sqrt((dx * dx) + (dy * dy)); - CLOG(DEBUG, "mpc_debug.cbit") << "Dist to Pt: " << new_dist; if (new_dist < min_dist) { CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; - index_counter = i; // I wonder if we should add some buffer, as this point could still be behind the current robot + p_correction = lookahead_dist; min_dist = new_dist; } - } - /* - // Find closest point on the path to the current state (Old version, mostly worked, but I think if the cbit path was not smooth there was a chance it would exit early before finding closest point) - while (delta_dist >= 0) - { - CLOG(DEBUG, "mpc_debug.cbit") << "x: " << (cbit_path)[index_counter].x << "y: " << (cbit_path)[index_counter].y << "z: " << (cbit_path)[index_counter].z; - dx = (cbit_path)[index_counter].x - std::get<0>(robot_pose); - dy = (cbit_path)[index_counter].y - std::get<1>(robot_pose); - new_dist = sqrt((dx * dx) + (dy * dy)); - delta_dist = starting_dist - new_dist; - CLOG(DEBUG, "mpc_debug.cbit") << "Dist to Pt: " << new_dist; - CLOG(DEBUG, "mpc_debug.cbit") << "Delta Dist: " << delta_dist; - if (delta_dist >= 0) - { - starting_dist = new_dist; - index_counter = index_counter + 1; - } - else + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) { - CLOG(DEBUG, "mpc_debug.cbit") << "Delta Dist Negative, Return i = " << index_counter-1; - index_counter = index_counter - 1; + break; } } - */ + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; - // Keep iterating through the rest of the path, storing points in the path as measurements if they maintain an approximate - // forward path velocity of VF (//TODO, may need to also interpolate in some instances if we want to be very particular) - for (int i = index_counter; i < (cbit_path).size(); i++) + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) { - // The first iteration we need to add the closest point to the initial position as a measurement - // Subesequent iterations we want to select points on the path to track carefully based on the desired velocity we want to meet. - - // Reset the measurement distance - double delta_dist = 0.0; - if (index_counter != i) - { - // scan the path into the future until we proceed approximately VF*DT meters forward longitudinally long the path - // The resulting indice of the path will be the one we use for the next measurement - while ((delta_dist <= (VF*DT)) && (i < (cbit_path).size())) // Need to add this and condition to handle end of path case - { - - double prev_x = (cbit_path)[i-1].x; - double prev_y = (cbit_path)[i-1].y; - double next_x = (cbit_path)[i].x; - double next_y = (cbit_path)[i].y; - delta_dist = delta_dist + sqrt(((next_x-prev_x) * (next_x-prev_x)) + ((next_y-prev_y) * (next_y-prev_y))); - i = i + 1; - } - // With the above setup, pretty sure the resulting i will be 1 too far when we break the loop, so we need to decrement it once at the end - i = i-1; - } + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas(p_meas_vec[i], cbit_p, cbit_path); + // add to measurement vector + measurements.push_back(meas); + } - // Derive a yaw direction for each point based on the vector formed between the current point and the previous point on the path - double yaw = std::atan2(((cbit_path)[i].y - (cbit_path)[i-1].y), ((cbit_path)[i].x - (cbit_path)[i-1].x)); - //CLOG(DEBUG, "mpc_debug.cbit") << "The yaw of the path pt is: " << yaw; + return {measurements, point_stabilization}; - // Generate a reference Transformation matrix (ignores roll/pitch) - Eigen::Matrix4d T_ref; - T_ref << std::cos(yaw),-1*std::sin(yaw),0,(cbit_path)[i].x, - std::sin(yaw), std::cos(yaw),0,(cbit_path)[i].y, - 0, 0, 1,(cbit_path)[i].z, - 0, 0, 0, 1; - T_ref = T_ref.inverse().eval(); - measurements.push_back(lgmath::se3::Transformation(T_ref)); + //End of Experimental Changes - // Early break condition when the number of measurements we need is satisfied based on the horizon - if (measurements.size() == K) - { - break; - } - // TODO handle end of path case => will want to repeat the final measurements and turn problem into a point stabilization MPC. +} - } - - // If we reach the end of the path without generating enough measurements (approaching end of path), populate the measurements - // with the final point in the path (point stabilization problem) - if (measurements.size() < K) +// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// Then tries to output a euclidean pose interpolated for the desired p_meas. +lgmath::se3::Transformation InterpolateMeas(double p_val, std::vector cbit_p, std::vector cbit_path) +{ + // Find the lower bound of the p values + for (int i = 0; i < cbit_p.size(); i++) + { + if (cbit_p[i] < p_val) { - CLOG(WARNING, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; - - // debug logging messages - /* - CLOG(ERROR, "mpc.cbit") << "Displaying all measurements so far:"; - CLOG(ERROR, "mpc.cbit") << "Size of cbit path is: " << cbit_path.size(); - CLOG(ERROR, "mpc.cbit") << "Size of measurements is: " << measurements.size(); - for (int k = 0; k < measurements.size(); k++) - { - CLOG(ERROR, "mpc.cbit") << "Measurement k: " << k << " is: " << measurements[k]; - } - - CLOG(ERROR, "mpc.cbit") << "The last valid transform was: " << measurements[measurements.size()-1]; - */ - for (int j = measurements.size(); j < K; j++) - { - measurements.push_back(measurements[j-1]); - //CLOG(ERROR, "mpc.cbit") << "Appending this transform to the measurements: " << measurements[j-1]; - } - - return {measurements, true}; + continue; } - else { - return {measurements, false}; - } + double p_lower = cbit_p[i-1]; + double p_upper = cbit_p[i]; + Pose pose_lower = cbit_path[i-1]; + Pose pose_upper = cbit_path[i]; -} + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + // For yaw we need to be abit careful about sign and angle wrap around + // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts + + double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + + + // Build the transformation matrix + Eigen::Matrix4d T_ref; + T_ref << std::cos(yaw_int),-1*std::sin(yaw_int),0, x_int, + std::sin(yaw_int), std::cos(yaw_int),0, y_int, + 0, 0, 1, z_int, + 0, 0, 0, 1; + T_ref = T_ref.inverse().eval(); + lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); + + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; + return meas; + } + } +} // Simple function for checking that the current output velocity command is saturated between our mechanical velocity limits @@ -409,32 +437,49 @@ Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, } */ - if (applied_vel(0) >= v_lim) + if (abs(applied_vel(0)) >= v_lim) { - command_lin_x = v_lim; - } - else if (applied_vel(0) <= 0.0) - { - command_lin_x = 0.0; + if (applied_vel(0) > 0.0) + { + command_lin_x = v_lim; + } + else if (applied_vel(0) < 0.0) + { + command_lin_x = -1.0* v_lim; + } } + // Removed for bi-directional control purposes + //else if (applied_vel(0) <= 0.0) + //{ + // command_lin_x = 0.0; + //} else { command_lin_x = applied_vel(0) ; } - if (applied_vel(1) >= w_lim) - { - command_ang_z = w_lim; - } - else if (applied_vel(1) <= -1*w_lim) + if (abs(applied_vel(1)) >= w_lim) { - command_ang_z = -1*w_lim; + if (applied_vel(1) > 0.0) + { + command_ang_z = w_lim; + } + else if (applied_vel(1) < 0.0) + { + command_ang_z = -1.0 * w_lim; + } } + //else if (applied_vel(1) <= -1*w_lim) + //{ + // command_ang_z = -1*w_lim; + //} else { command_ang_z = applied_vel(1) ; } + // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 + saturated_vel << command_lin_x, command_ang_z; return saturated_vel; } @@ -467,6 +512,7 @@ Eigen::Matrix SaturateVel(Eigen::Matrix applied_vel, + \ No newline at end of file diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index 150265988..89519667a 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -11,16 +11,16 @@ Panels: - /localization path1 - /curr map loc1/Topic1 - /reference path1 + - /reference path1/Topic1 - /planned path1/Offset1 - /planned path (poses)1/Topic1 - /global path1 - /global path1/Offset1 - /planning change detection scan1 - /planning curr map loc1 - - /Path1 - - /Path2 + - /Corridor Path Left1/Topic1 Splitter Ratio: 0.4507042169570923 - Tree Height: 871 + Tree Height: 865 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -89,7 +89,7 @@ Visualization Manager: robot planning (extrapolated): Value: false robot planning (extrapolated) mpc: - Value: true + Value: false world: Value: false world (offset): @@ -101,8 +101,22 @@ Visualization Manager: Show Names: true Tree: world: - world (offset): + loc vertex frame: {} + odo vertex frame: + robot: + lidar: + {} + planning frame: + robot planning: + {} + robot planning (extrapolated): + {} + robot planning (extrapolated) mpc: + {} + world (offset): + loc vertex frame (offset): + {} Update Interval: 0 Value: true - Alpha: 1 @@ -351,7 +365,7 @@ Visualization Manager: Head Length: 0.20000000298023224 Length: 1 Line Style: Billboards - Line Width: 0.05000000074505806 + Line Width: 0.07999999821186066 Name: reference path Offset: X: 0 @@ -1174,7 +1188,7 @@ Visualization Manager: Class: rviz_default_plugins/Map Color Scheme: costmap Draw Behind: false - Enabled: true + Enabled: false Name: obstacle cost map Topic: Depth: 5 @@ -1190,7 +1204,7 @@ Visualization Manager: Reliability Policy: Reliable Value: /vtr/change_detection_costmap_updates Use Timestamp: false - Value: true + Value: false - Alpha: 0.699999988079071 Class: rviz_default_plugins/Map Color Scheme: raw @@ -1212,6 +1226,62 @@ Visualization Manager: Value: /vtr/filtered_change_detection_costmap_updates Use Timestamp: false Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz_default_plugins/Path + Color: 170; 0; 255 + Enabled: true + Head Diameter: 0.30000001192092896 + Head Length: 0.20000000298023224 + Length: 0.30000001192092896 + Line Style: Billboards + Line Width: 0.07999999821186066 + Name: Corridor Path Left + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.029999999329447746 + Shaft Diameter: 0.10000000149011612 + Shaft Length: 0.10000000149011612 + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/corridor_path_left + Value: true + - Alpha: 1 + Buffer Length: 1 + Class: rviz_default_plugins/Path + Color: 170; 0; 255 + Enabled: true + Head Diameter: 0.30000001192092896 + Head Length: 0.20000000298023224 + Length: 0.30000001192092896 + Line Style: Billboards + Line Width: 0.07999999821186066 + Name: Corridor Path Right + Offset: + X: 0 + Y: 0 + Z: 0 + Pose Color: 255; 85; 255 + Pose Style: None + Radius: 0.029999999329447746 + Shaft Diameter: 0.10000000149011612 + Shaft Length: 0.10000000149011612 + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/corridor_path_right + Value: true Enabled: true Global Options: Background Color: 181; 181; 181 @@ -1258,25 +1328,25 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 18.444183349609375 + Distance: 19.4349422454834 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: -0.07232196629047394 - Y: 0.004224673379212618 - Z: 0.5842546820640564 + X: 0.722629964351654 + Y: 0.6375535130500793 + Z: -0.5986025333404541 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 - Pitch: 1.5697963237762451 + Pitch: 1.3647969961166382 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 4.775712966918945 + Yaw: 4.070685863494873 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 @@ -1484,7 +1554,7 @@ Window Geometry: Height: 1016 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c00000030ffc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d0000030f000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004f9000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000002810000039efc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000009d00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000007400fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000039e000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d0065000000000000000500000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004b10000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -1493,6 +1563,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1920 - X: 2560 + Width: 1848 + X: 1992 Y: 27 From a66b925f0de386f8dbafa007c247ff7f9aa42d6f Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Sat, 21 Jan 2023 17:01:33 -0500 Subject: [PATCH 08/25] additional debugs and further tuned planner --- config/honeycomb_grizzly_default.yaml | 18 +++++----- .../vtr-gui/src/components/graph/GraphMap.js | 3 +- main/src/vtr_lidar/src/path_planning/mpc.cpp | 10 +++--- .../src/cbit/cbit_path_planner.cpp | 33 +++++++++++++------ .../src/mpc/mpc_path_planner.cpp | 10 +++--- main/src/vtr_tactic/src/tactic.cpp | 15 ++++++++- 6 files changed, 58 insertions(+), 31 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 5dc3b1855..5fd21c6f5 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -17,10 +17,10 @@ # "tactic.module.live_mem_manager", # "tactic.module.graph_mem_manager", # path planner - "path_planning", + #"path_planning", #"path_planning.teb", "path_planning.cbit", - "path_planning.cbit_planner", + #"path_planning.cbit_planner", #"mpc.cbit", #"mpc_debug.cbit", #"obstacle_detection.cbit", @@ -59,7 +59,7 @@ graph_projection: origin_lat: 43.78220 # PETAWAWA: 45.8983, # UTIAS: 43.78220 origin_lng: -79.4661 # PETAWA: -77.2829, # UTIAS: -79.4661 - origin_theta: 1.3 + origin_theta: 0.0 scale: 1.0 ############ tactic configuration ############ @@ -72,7 +72,7 @@ task_queue_num_threads: 1 task_queue_size: -1 - route_completion_translation_threshold: 2.0 + route_completion_translation_threshold: 0.5 chain: min_cusp_distance: 1.5 @@ -296,7 +296,7 @@ detection_range: 8.0 search_radius: 0.25 - negprob_threshold: 0.05 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives + negprob_threshold: 0.2 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives use_prior: true alpha0: 3.0 # 6 / 2 (6 pseudo observations) beta0: 0.03 # 0.01 * 6 / 2 (6 pseudo observations of variance 0.01) @@ -443,8 +443,8 @@ rand_seed: 1 # Planner Tuning Params - initial_samples: 350 - batch_samples: 200 + initial_samples: 300 + batch_samples: 150 pre_seed_resolution: 0.5 alpha: 0.5 q_max: 2.5 @@ -473,9 +473,9 @@ vehicle_model: "unicycle" # Cost Function Weights - pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + pose_error_cov: [0.5, 0.5, 0.5, 10000.0, 10000.0, 10000.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term - acc_error_cov: [2.0, 2.0] + acc_error_cov: [1.0, 1.0] kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] #backup diff --git a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js index 618c05978..fd4c1754e 100644 --- a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js +++ b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js @@ -214,7 +214,8 @@ class GraphMap extends React.Component { <> {/* Leaflet map container with initial center set to UTIAS (only for initialization) */} TODO We should make this set dynamically from the yaml config*/ + + center={[43.78220, -79.4661]} /* Jordy Modification For PETAWAWA center={[45.8983, -77.2829]} => TODO We should make this set dynamically from the yaml config*/ zoom={18} zoomControl={false} whenCreated={this.mapCreatedCallback.bind(this)} diff --git a/main/src/vtr_lidar/src/path_planning/mpc.cpp b/main/src/vtr_lidar/src/path_planning/mpc.cpp index a56174424..18484f270 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc.cpp @@ -156,11 +156,11 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) - if (point_stabilization == false) - { - const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(vel_cost_term); - } + //if (point_stabilization == false) + //{ + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // opt_problem.addCostTerm(vel_cost_term); + //} // Experimental acceleration limits diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index e9e7f550b..36b6d1bdd 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -291,6 +291,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo if (chain.isLocalized() == 0) { // If we ever become unlocalized, I think we just need to break, then set a flag to exit the outter loop + // This also triggers after we reach end of path and effectively shuts down the planner bool localization_flag = false; CLOG(ERROR, "path_planning.cbit_planner") << "Localization was Lost, Exiting Inner Planning Loop"; break; @@ -327,9 +328,11 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo else { robot_pose= T2xyzrpy(T_w_p * T_p_r); - //CLOG(INFO, "path_planning.cbit_planner") << "Robot Pose: x: " << std::get<0>(robot_pose) << " y: " - //<< std::get<1>(robot_pose) << " z: " << std::get<2>(robot_pose) << " roll: " << std::get<3>(robot_pose) << " pitch: " - //<< std::get<4>(robot_pose) << " yaw: " << std::get<5>(robot_pose); + + // experimental, finding the direction the robot is facing (planing forward or in reverse) + //debug + //auto test_pose = T2xyzrpy(T_p_r); + //CLOG(ERROR, "path_planning.cbit_planner") << "Robot Pose in planing frame yaw: " << std::get<5>(test_pose); } @@ -704,11 +707,11 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // TODO: Dynamic Expansion radius selection: // Its debatable whether this works well or not, but going to try it - if (p_goal->g_T != INFINITY) - { - conf.initial_exp_rad = exp_radius((tree.V.size() + samples.size()), sample_box_height, sample_box_width, conf.eta); - CLOG(DEBUG, "path_planning.cbit_planner") << "New expansion radius is: " << conf.initial_exp_rad; - } + //if (p_goal->g_T != INFINITY) + //{ + // conf.initial_exp_rad = exp_radius((tree.V.size() + samples.size()), sample_box_height, sample_box_width, conf.eta); + // CLOG(DEBUG, "path_planning.cbit_planner") << "New expansion radius is: " << conf.initial_exp_rad; + //} } @@ -842,7 +845,14 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } // End of main planning for loop - + // Exiting cleanly: + // If we make it here and are no longer localized, we've reached end of path and should clear the bitstar path from memory + if (localization_flag == false) + { + CLOG(ERROR, "path_planning.cbit_planner") << "Reached End of Plan, Exiting cleanly"; + (*cbit_path_ptr).clear(); + + } // FAULT AND EARLY MAIN LOOP EXIT HANDLING @@ -1219,7 +1229,10 @@ std::shared_ptr CBITPlanner::UpdateState() int q_sign = sgn((B.p - A.p) * (new_state->y - A.q) - ((B.q - A.q) * (new_state->x - A.p))); q_min = q_min * q_sign; - //std::cout << "q_min is: " << q_min << std::endl; + //std::cout << "q_min is: " << q_min << std::endl; // debug; + + // Note I think we also need to take into account the direction the robot is facing on the path for reverse planning too + // Once we have the closest point on the path, it may not actually be the correct p-value because of singularities in the euclid to curv conversion // We need to use this points p-value as a starting point, then search p, qmin space in either direction discretely and find the point with diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp index 561006f2b..7945fa2dd 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner.cpp @@ -155,11 +155,11 @@ struct mpc_result SolveMPC(Eigen::Matrix previous_vel, lgmath::se3 // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) - if (point_stabilization == false) - { - const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(vel_cost_term); - } + //if (point_stabilization == false) + //{ + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // opt_problem.addCostTerm(vel_cost_term); + //} // Experimental acceleration limits diff --git a/main/src/vtr_tactic/src/tactic.cpp b/main/src/vtr_tactic/src/tactic.cpp index de41dd65a..5b0f1b66c 100644 --- a/main/src/vtr_tactic/src/tactic.cpp +++ b/main/src/vtr_tactic/src/tactic.cpp @@ -163,10 +163,23 @@ bool Tactic::passedSeqId(const uint64_t& sid) const { bool Tactic::routeCompleted() const { auto lock = chain_->guard(); - if (chain_->trunkSequenceId() < (chain_->sequence().size() - 1)) return false; const auto translation = chain_->T_leaf_trunk().r_ab_inb().norm(); + //const auto test = (chain_->pose(chain_->size()-1)).r_ab_inb().norm(); + //CLOG(ERROR, "tactic") << "My test pose error is: " << test; + //if (test < config_->route_completion_translation_threshold) // experimental jordy modification + //{ + // CLOG(ERROR, "tactic") << "Route Completetion Threshold is: " << translation; + // CLOG(ERROR, "tactic") << "My test pose error is: " << test; + // return true; + //} + if (chain_->trunkSequenceId() < (chain_->sequence().size() - 1)) + { + return false; + } if (translation > config_->route_completion_translation_threshold) + { return false; + } return true; } From ca127c5c239b8fb8816e21a66425165f0f9b6315 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 23 Jan 2023 11:22:42 -0500 Subject: [PATCH 09/25] minor config changes --- config/honeycomb_grizzly_default.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 5fd21c6f5..8545f4feb 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -296,7 +296,7 @@ detection_range: 8.0 search_radius: 0.25 - negprob_threshold: 0.2 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives + negprob_threshold: 0.1 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives use_prior: true alpha0: 3.0 # 6 / 2 (6 pseudo observations) beta0: 0.03 # 0.01 * 6 / 2 (6 pseudo observations of variance 0.01) From 05dc487966d28e38ae82101800d137c14bbaddda Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 24 Jan 2023 11:45:35 -0500 Subject: [PATCH 10/25] Added new module for ouster lidar --- main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp index b5a9db8c7..9368fa3a7 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp @@ -23,6 +23,7 @@ #include "vtr_lidar/modules/preprocessing/conversions/honeycomb_conversion_module_v2.hpp" #include "vtr_lidar/modules/preprocessing/conversions/velodyne_conversion_module.hpp" #include "vtr_lidar/modules/preprocessing/conversions/velodyne_conversion_module_v2.hpp" +#include "vtr_lidar/modules/preprocessing/conversions/ouster_conversion_module.hpp" #include "vtr_lidar/modules/preprocessing/preprocessing_module.hpp" #include "vtr_lidar/modules/preprocessing/preprocessing_module_v2.hpp" From 73b7a08fb7cfb2a81d4375db1378bbc768be2c6d Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 20 Feb 2023 15:38:02 -0500 Subject: [PATCH 11/25] Switched planner to multimaps, implemented bi-directional repeats --- config/honeycomb_grizzly_default.yaml | 14 +- .../cbit/cbit_path_planner.hpp | 8 +- .../include/vtr_path_planning/cbit/utils.hpp | 3 + main/src/vtr_path_planning/src/cbit/cbit.cpp | 27 ++- .../src/cbit/cbit_path_planner.cpp | 202 +++++++++++++----- rviz/honeycomb.rviz | 16 +- 6 files changed, 195 insertions(+), 75 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 8545f4feb..dd1a23a2b 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -11,7 +11,7 @@ #"navigation.graph_map_server", #"navigation.command", # tactic - # "tactic", + #"tactic", # "tactic.pipeline", # "tactic.module", # "tactic.module.live_mem_manager", @@ -199,7 +199,7 @@ vertex_test: type: lidar.vertex_test - max_translation: 0.3 + max_translation: 1.0 # was 0.3 max_rotation: 10.0 intra_exp_merging: @@ -296,7 +296,7 @@ detection_range: 8.0 search_radius: 0.25 - negprob_threshold: 0.1 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives + negprob_threshold: 0.25 # was 0.015 # -1.86 without prior, 0.015 with prior # Jordy: I found I needed to bump this up abit (approx 0.075+) to reduce false positives use_prior: true alpha0: 3.0 # 6 / 2 (6 pseudo observations) beta0: 0.03 # 0.01 * 6 / 2 (6 pseudo observations of variance 0.01) @@ -408,7 +408,7 @@ ############ path planning configuration ############ path_planning: - type: cbit.lidar # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version + type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version control_period: 50 # ms teb: # vtr specific @@ -444,7 +444,7 @@ # Planner Tuning Params initial_samples: 300 - batch_samples: 150 + batch_samples: 100 pre_seed_resolution: 0.5 alpha: 0.5 q_max: 2.5 @@ -467,8 +467,8 @@ # Controller Params horizon_steps: 20 horizon_step_size: 0.3 - forward_vel: 0.75 - max_lin_vel: 1.25 + forward_vel: 1.25 + max_lin_vel: 1.75 max_ang_vel: 0.75 vehicle_model: "unicycle" diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index ce34f6674..e1e84d5ea 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -78,7 +78,7 @@ class CBITPlanner { // Costmap pointer std::shared_ptr cbit_costmap_ptr; - CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); + CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); protected: struct ChainInfo { @@ -94,10 +94,10 @@ class CBITPlanner { private: void InitializePlanningSpace(); - void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); + void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); void ResetPlanner(); - void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr); - std::shared_ptr UpdateState(); + void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); + std::shared_ptr UpdateState(double path_direction); std::vector> SampleBox(int m); std::vector> SampleFreeSpace(int m); double BestVertexQueueValue(); diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp index 9c62b1c42..e46d259ff 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp @@ -24,6 +24,7 @@ #include #include #include +#include #pragma once @@ -92,7 +93,9 @@ class Tree { std::vector, std::shared_ptr>> E; std::vector, std::shared_ptr>> E_Old; std::vector> QV; // using shared pointers + std::multimap> QV2; std::vector, std::shared_ptr>> QE; + std::multimap, std::shared_ptr>> QE2; Tree() = default; }; diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 2a6fd0589..a39563994 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -228,6 +228,31 @@ void CBIT::initializeRoute(RobotState& robot_state) { euclid_path_vec.push_back(se3_pose); } + + // experimental, trying to determine sign for path following direction + // Using two consecutive poses on the path, we need to try to determine which direction the repeat is going: + const auto chain_info = getChainInfo(robot_state); + auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; + auto world_frame_pose = T2xyzrpy(T_w_p * T_p_r); + auto test1 = euclid_path_vec[0]; + auto test2 = euclid_path_vec[1]; + auto path_yaw = std::atan2((test2.y-test1.y),(test2.x-test1.y)); + auto pose_graph_yaw = std::get<5>(world_frame_pose); + CLOG(INFO, "path_planning.cbit") << "The path_yaw is: " << path_yaw; + CLOG(INFO, "path_planning.cbit") << "The pose_graph yaw is: " << pose_graph_yaw; + // Logic for determining the forward/reverse sign: + double path_direction; //1.0 = forward planning, -1.0 = reverse planning + if (abs((abs(path_yaw) - abs(pose_graph_yaw))) > 1.57075) + { + path_direction = -1.0; + } + else + { + path_direction = 1.0; + } + CLOG(INFO, "path_planning.cbit") << "The path repeat direction is:" << path_direction; + + CLOG(INFO, "path_planning.cbit") << "Trying to create global path"; // Create the path class object (Path preprocessing) CBITPath global_path(cbit_config, euclid_path_vec); @@ -244,7 +269,7 @@ void CBIT::initializeRoute(RobotState& robot_state) { // Instantiate the planner - CBITPlanner cbit(cbit_config, global_path_ptr, robot_state, cbit_path_ptr, costmap_ptr, corridor_ptr); + CBITPlanner cbit(cbit_config, global_path_ptr, robot_state, cbit_path_ptr, costmap_ptr, corridor_ptr, path_direction); CLOG(INFO, "path_planning.cbit") << "Planner successfully created and resolved"; } diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 36b6d1bdd..fc78f2648 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -44,7 +44,7 @@ auto CBITPlanner::getChainInfo(vtr::path_planning::BasePathPlanner::RobotState& } // Class Constructor: -CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) +CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) { // Setting random seed @@ -78,7 +78,7 @@ CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, //initialize_plot(); InitializePlanningSpace(); - Planning(robot_state, costmap_ptr, corridor_ptr); + Planning(robot_state, costmap_ptr, corridor_ptr, path_direction); // DEBUG CODE, ROBOT STATE UPDATE QUERY EXAMPLE /* @@ -114,8 +114,8 @@ void CBITPlanner::InitializePlanningSpace() tree.V.reserve(10000); tree.V_Old.reserve(10000); tree.E.reserve(10000); - tree.QV.reserve(10000); - tree.QE.reserve(10000); + //tree.QV.reserve(10000); + //tree.QE.reserve(10000); // Set initial cost to comes p_start->g_T = 0.0; @@ -142,7 +142,7 @@ void CBITPlanner::InitializePlanningSpace() } // Reset fuction which goes through the entire reset procedure (including calling ResetPlanner and restarting the planner itself) -void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) +void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) { // I think we may also want to add a small time delay just so we arent repeatedly spamming an optimal solution CLOG(ERROR, "path_planning.cbit_planner") << "Plan could not be improved, Initiating Reset"; @@ -180,7 +180,7 @@ void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& rob } // Perform a state update to convert the actual robot position to its corresponding pq space: - p_goal = UpdateState(); + p_goal = UpdateState(path_direction); p_goal_backup = p_goal; @@ -205,7 +205,7 @@ void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& rob CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots goal is now: p: " << p_goal->p << " q: " << p_goal->q; CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots start is now: p: " << p_start->p << " q: " << p_start->q << " g_T: " << p_start->g_T; - Planning(robot_state, costmap_ptr, corridor_ptr); + Planning(robot_state, costmap_ptr, corridor_ptr, path_direction); } // If we ever exit the planner due to a fault, we will do a hard reset, everything but the current robot_state (p_goal) and the inputs will be reinitialized @@ -213,8 +213,10 @@ void CBITPlanner::ResetPlanner() { tree.V.clear(); tree.E.clear(); - tree.QV.clear(); - tree.QE.clear(); + //tree.QV.clear(); + tree.QV2.clear(); + //tree.QE.clear(); + tree.QE2.clear(); tree.V_Repair_Backup.clear(); tree.V_Old.clear(); samples.clear(); @@ -237,7 +239,7 @@ void CBITPlanner::ResetPlanner() // Main planning function -void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr) +void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) { // find some better places to initialize these double prev_path_cost = INFINITY; // experimental @@ -346,7 +348,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo new_state = std::make_unique (se3_robot_pose); // Perform a state update to convert the actual robot position to its corresponding pq space: - p_goal = UpdateState(); + p_goal = UpdateState(path_direction); //TODO: It could be useful to convert this p_goal back to euclid and compare with se3_robot_pose to verify the conversion worked properly (error should be very low) // If its not, we probably need to handle it or return an error @@ -401,8 +403,10 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo //} // Find vertices in the tree which are close to the new state, then populate the vertex queue with only these values. - tree.QV.clear(); - tree.QE.clear(); + //tree.QV.clear(); + tree.QV2.clear(); + //tree.QE.clear(); + tree.QE2.clear(); // EXPERIMENTAL, only take the closest point in the tree and use this to connect to the new robot state /* @@ -430,7 +434,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo sample_dist = calc_dist(*(tree.V[i]), *p_goal); if ((sample_dist <= 2.0) && ((tree.V[i])->p > (p_goal->p + (conf.initial_exp_rad/2)))) // TODO: replace magic number with a param, represents radius to search for state update rewires { - tree.QV.push_back(tree.V[i]); // comment out when only taking closest point in the tree to consider + //tree.QV.push_back(tree.V[i]); // comment out when only taking closest point in the tree to consider + tree.QV2.insert(std::pair>((tree.V[i]->g_T_weighted + h_estimated_admissible(*tree.V[i], *p_goal)), tree.V[i])); } } @@ -439,7 +444,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo CLOG(DEBUG, "path_planning.cbit_planner") << "Robot State Updated Successfully, p: " << p_goal->p << " q: " << p_goal->q; //CLOG(DEBUG, "path_planning.cbit_planner") << "Nearest Vertex, p: " << tree.V[closest_sample_ind]->p << " q: " << tree.V[closest_sample_ind]->q; - CLOG(DEBUG, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); + //CLOG(DEBUG, "path_planning.cbit_planner") << "QV size: " << tree.QV.size(); + CLOG(DEBUG, "path_planning.cbit_planner") << "QV size: " << tree.QV2.size(); // When the planner resumes, this will cause it to immediately try to rewire locally to the new robot state in a short amount of time @@ -448,7 +454,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo int m; - if (tree.QV.size() == 0 && tree.QE.size() == 0) + //if (tree.QV.size() == 0 && tree.QE.size() == 0) + if (tree.QV2.size() == 0 && tree.QE2.size() == 0) { //std::cout << "New Batch:" << std::endl; if (k == 0) @@ -692,17 +699,20 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // if we have no 1st batch solution (we are in the first iteration or have just reset), add the whole tree to the queue if (k == 0) { - tree.QV.push_back(tree.V[i]); + //tree.QV.push_back(tree.V[i]); + tree.QV2.insert(std::pair>((tree.V[i]->g_T_weighted + h_estimated_admissible(*tree.V[i], *p_goal)), tree.V[i])); } // Otherwise, only add the portions of the tree within the sliding window to avoid processing preseeded vertices which are already optimal //else if (((tree.V[i]->p) <= p_goal->p + dynamic_window_width + (conf.sliding_window_freespace_padding*2)) && ((vertex_rej_prob / random_integer) >= 1.0)) else if (((vertex_rej_prob / random_integer) >= 1.0)) // for some reason using the lookahead queue doesnt work reliably for collisions, not sure why, need to investigate { - tree.QV.push_back(tree.V[i]); + //tree.QV.push_back(tree.V[i]); + tree.QV2.insert(std::pair>((tree.V[i]->g_T_weighted + h_estimated_admissible(*tree.V[i], *p_goal)), tree.V[i])); } } - CLOG(DEBUG, "path_planning.cbit_planner") << "QV size after batch: " << tree.QV.size(); + //CLOG(DEBUG, "path_planning.cbit_planner") << "QV size after batch: " << tree.QV.size(); + CLOG(DEBUG, "path_planning.cbit_planner") << "QV size after batch: " << tree.QV2.size(); // TODO: Dynamic Expansion radius selection: // Its debatable whether this works well or not, but going to try it @@ -720,6 +730,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Planning loop starts here // I think there is some gains to be had here, we actually iterate through the vertex queue twice finding minima, // I think we probably could combine the 2 vertex functions into 1 cutting compute in half (we can test to see if this is a big gain or not) + while (BestVertexQueueValue() < BestEdgeQueueValue()) { //auto test = BestInVertexQueue(); @@ -778,6 +789,13 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo upper_edge_index = upper_edge_index - 1; } } + + // Set cost to comes + xm->g_T_weighted = vm->g_T_weighted + weighted_cost; + xm->g_T = vm->g_T + actual_cost; + xm->parent = vm; + // TODO: wormhole handling + } // Otherwise we can remove xm from the samples, add xm to the tree vertices and queue @@ -796,20 +814,22 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo break; // Once we find the one to delete, it is safe to break, there should only every be one } } + // Set cost to comes + xm->g_T_weighted = vm->g_T_weighted + weighted_cost; + xm->g_T = vm->g_T + actual_cost; + xm->parent = vm; + // TODO: wormhole handling + tree.V.push_back(xm); - tree.QV.push_back(xm); + //tree.QV.push_back(xm); + tree.QV2.insert(std::pair>(xm->g_T_weighted + h_estimated_admissible(*xm, *p_goal), xm)); } - // Set cost to comes - xm->g_T_weighted = vm->g_T_weighted + weighted_cost; - xm->g_T = vm->g_T + actual_cost; - xm->parent = vm; - // TODO: wormhole handling // Generate edge, create parent chain tree.E.push_back(std::tuple, std::shared_ptr> {vm, xm}); - + /* // Filter edge queue as now any other edge with worse cost heuristic can be ignored int upper_queue_index = tree.QE.size(); for (int i = 0; i < upper_queue_index; i++) @@ -829,14 +849,41 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo upper_queue_index = upper_queue_index - 1; } } + */ + + + // Variation using multimap edge queue. In this case we kind of need to do the same procedure unfortunately (slow) + // Only here we have to use iterators to go through the queue + // NOTE THIS NEEDS SOME VERY GOOD DEBUGGING BEFORE TRUSTING THIS CODE, NOT SURE IF ITERATORS BEHAVE THE SAME WAY AS INDEXING WITH DELETION + // For iterators, we need to do this process in a while loop because we cannot just decrement the index like before (leads to bad pointer) + std::multimap, std::shared_ptr>>::iterator itr = tree.QE2.begin(); + while (itr != tree.QE2.end()) + { + auto edge = itr->second; + Node v = *std::get<0>(edge); + std::shared_ptr x = std::get<1>(edge); + + if ((x == xm) && (v.g_T_weighted + calc_weighted_dist(v, *xm, conf.alpha) >= xm->g_T_weighted)) + { + // Once again, be very careful with the iteration indices when deleting + // Need to decrement both the iterator and the total size + itr = tree.QE2.erase(itr); // Erase returns the itr of the value in the map following the previous iterator + } + else + { + itr++; + } + } } } } // If there is no edges in the queue which can improve the current tree, clear the queues which will consequently trigger the end of a batch else { - tree.QV.clear(); - tree.QE.clear(); + //tree.QV.clear(); + tree.QV2.clear(); + //tree.QE.clear(); + tree.QE2.clear(); } // TODO: Plotting (if we want) @@ -897,7 +944,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo new_state = std::make_unique (se3_robot_pose); // Perform a state update to convert the actual robot position to its corresponding pq space: - p_goal = UpdateState(); + p_goal = UpdateState(path_direction); p_goal_backup = p_goal; @@ -922,7 +969,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots goal is now: p: " << p_goal->p << " q: " << p_goal->q; CLOG(INFO, "path_planning.cbit_planner") << "The p,q coordinate of the robots start is now: p: " << p_start->p << " q: " << p_start->q << " g_T: " << p_start->g_T; - Planning(robot_state, costmap_ptr); + Planning(robot_state, costmap_ptr, path_direction); */ @@ -1041,10 +1088,14 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) // TODO: Generating Pre-seeds if (repair_mode == false) { + // hardcoded pre-seed interval for now //int pre_seeds = abs(p_goal->p - p_start->p) / 0.25; int pre_seeds = abs(p_zero - p_start->p) / 0.25; // Note needed to change p_goal to p_zero. When the sliding window padding is large, pre-seeds wont get generated all the way to the goal + std::cout << "generating pre-seeds - number is:" << pre_seeds << std::endl; + std::cout << "The size of the tree is:" << tree.V.size() << std::endl; + // In the python version I do this line thing which is more robust, but for now im going to do this quick and dirty double p_step = 0.25; double p_val = p_zero; // needed to modify the starting i to be p_zero @@ -1160,7 +1211,7 @@ void CBITPlanner::Prune(double c_best, double c_best_weighted) // Function for updating the state of the robot, updates the goal and sliding window, and prunes the current tree // Note that this stage requires us to convert the state of the robot in euclidean space into a singular curvilinear space pt, which is actually slightly tricky // To do without singularities -std::shared_ptr CBITPlanner::UpdateState() +std::shared_ptr CBITPlanner::UpdateState(double path_direction) { //std::cout << "The new state is x: " << new_state->x << " y: " << new_state->y << std::endl; @@ -1232,6 +1283,8 @@ std::shared_ptr CBITPlanner::UpdateState() //std::cout << "q_min is: " << q_min << std::endl; // debug; // Note I think we also need to take into account the direction the robot is facing on the path for reverse planning too + q_min = q_min * q_sign * path_direction; + //std::cout << "q_min is: " << q_min << std::endl; // debug; // Once we have the closest point on the path, it may not actually be the correct p-value because of singularities in the euclid to curv conversion @@ -1276,11 +1329,16 @@ std::shared_ptr CBITPlanner::UpdateState() double CBITPlanner::BestVertexQueueValue() { - if (tree.QV.size() == 0) + //if (tree.QV.size() == 0) + //{ + // return INFINITY; + //} + if (tree.QV2.size() == 0) { return INFINITY; } // Iterate through all vertices in the queue, find the minimum cost heurstic + /* double min_vertex_cost = INFINITY; for (int i = 0; i < tree.QV.size(); i++) { @@ -1291,6 +1349,11 @@ double CBITPlanner::BestVertexQueueValue() min_vertex_cost = weighted_heuristic; } } + */ + + // New multimap implementation of this: + std::multimap>::iterator itr = tree.QV2.begin(); + double min_vertex_cost = itr -> first; return min_vertex_cost; } @@ -1299,11 +1362,15 @@ double CBITPlanner::BestVertexQueueValue() double CBITPlanner::BestEdgeQueueValue() { - if (tree.QE.size() == 0) + //if (tree.QE.size() == 0) + //{ + // return INFINITY; + //} + if (tree.QE2.size() == 0) { return INFINITY; } - + /* double min_edge_cost = INFINITY; for (int i = 0; i < tree.QE.size(); i++) { @@ -1320,8 +1387,12 @@ double CBITPlanner::BestEdgeQueueValue() { min_edge_cost = weighted_heuristic; } - } + */ + + // Using multimaps to accomplish the same thing: + std::multimap, std::shared_ptr>>::iterator itr = tree.QE2.begin(); + double min_edge_cost = itr -> first; return min_edge_cost; } @@ -1330,16 +1401,19 @@ double CBITPlanner::BestEdgeQueueValue() std::shared_ptr CBITPlanner::BestInVertexQueue() { - if (tree.QV.size() == 0) - { + //if (tree.QV.size() == 0) + //{ //std::cout << "Vertex Queue is Empty!" << std::endl; + //CLOG(INFO, "path_planning.cbit_planner") << "Vertex Queue is Empty, Something went Wrong!"; + //} + if (tree.QV2.size() == 0) + { CLOG(INFO, "path_planning.cbit_planner") << "Vertex Queue is Empty, Something went Wrong!"; - //return; // TODO: Need to fix these returns, I can potentially use all auto types to do something similar to what I did in python - // But I think longer term I should find a better solution. This case only occurs if a solution cannot be found, which for now can be ignored } - // Loop through the vertex queue, select the vertex node with the lowest cost to come + heuristic dist to goal + // Loop through the vertex queue, select the vertex node with the lowest cost to come + heuristic dist to goal + /* double min_vertex_cost = INFINITY; std::shared_ptr best_vertex; int best_vertex_ind; @@ -1358,6 +1432,14 @@ std::shared_ptr CBITPlanner::BestInVertexQueue() auto it = tree.QV.begin() + best_vertex_ind; *it = std::move(tree.QV.back()); tree.QV.pop_back(); + */ + + + // New multimap implementation of this: + std::multimap>::iterator itr = tree.QV2.begin(); + std::shared_ptr best_vertex = itr -> second; + double min_vertex_cost = itr -> first; + tree.QV2.erase(itr); return best_vertex; } @@ -1381,7 +1463,9 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) //tree.QE.push_back(std::tuple, std::shared_ptr> (v_copy, x_copy)); // direct method - tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (samples[i])}); + //tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (samples[i])}); + tree.QE2.insert(std::pair, std::shared_ptr>>((v->g_T_weighted + calc_weighted_dist(*v,*samples[i],conf.alpha) + h_estimated_admissible(*samples[i], *p_goal)), std::tuple, std::shared_ptr> {(v), (samples[i])})); + } } @@ -1414,7 +1498,8 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) // If all conditions satisfied, add the edge to the queue //tree.V[i]->g_T = INFINITY; // huh actually looking at this again, im not sure if I should be doing this (yeah I think was a mistake) //tree.V[i]->g_T_weighted = INFINITY; // huh actually looking at this again, im not sure if I should be doing this (yeah I think its a mistake) - tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (tree.V[i])}); + //tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (tree.V[i])}); + tree.QE2.insert(std::pair, std::shared_ptr>>((v->g_T_weighted + calc_weighted_dist(*v,*tree.V[i],conf.alpha) + h_estimated_admissible(*tree.V[i], *p_goal)), std::tuple, std::shared_ptr> {(v), (tree.V[i])})); } /* @@ -1425,6 +1510,7 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) //tree.V[i]->g_T = INFINITY; //I think these two lines were a mistake, should not be here //tree.V[i]->g_T_weighted = INFINITY; //I think these two lines were a mistake, should not be here tree.QE.push_back(std::tuple, std::shared_ptr> {(v), (tree.V[i])}); + //tree.QE2.insert(std::pair, std::shared_ptr>>((v->g_T_weighted + calc_weighted_dist(*v,*tree.V[i],conf.alpha) + h_estimated_admissible(*tree.V[i], *p_goal)), std::tuple, std::shared_ptr> {(v), (tree.V[i])})); */ } @@ -1434,22 +1520,18 @@ void CBITPlanner::ExpandVertex(std::shared_ptr v) std::tuple, std::shared_ptr> CBITPlanner::BestInEdgeQueue() { - if (tree.QE.size() == 0) // need to handle a case where the return path is 100% optimal in which case things get stuck and need ot be flagged to break + //if (tree.QE.size() == 0) // need to handle a case where the return path is 100% optimal in which case things get stuck and need ot be flagged to break + //{ + // CLOG(DEBUG, "path_planning.cbit_planner") << "Edge Queue is Empty, Solution Could Not be Improved This Batch"; + // return std::tuple, std::shared_ptr> {NULL, NULL}; + //} + if (tree.QE2.size() == 0) // need to handle a case where the return path is 100% optimal in which case things get stuck and need ot be flagged to break { - //std::cout << "Edge Queue is Empty! Optimal Solution Found" << std::endl; CLOG(DEBUG, "path_planning.cbit_planner") << "Edge Queue is Empty, Solution Could Not be Improved This Batch"; - //CLOG(WARNING, "path_planning.cbit_planner") << "Tree Size: " << tree.V.size() << " Vertex Queue Size: " << tree.QV.size() << " Sample Size: " <p << " q: " << tree.V[0]->q << " g_T_weighted: " << tree.V[0]->g_T_weighted; - - //CLOG(WARNING, "path_planning.cbit_planner") << "Repair mode is: " << repair_mode; - //CLOG(WARNING, "path_planning.cbit_planner") << "final sample is p: " << samples[samples.size()-1]->p; // note this can cause indexing errors in some cases - - return std::tuple, std::shared_ptr> {NULL, NULL}; - } - + /* // Loop through edge queue, find the edge with the smallest heuristic cost std::shared_ptr v; std::shared_ptr x; @@ -1473,6 +1555,14 @@ std::tuple, std::shared_ptr> CBITPlanner::BestInEdge *it = std::move(tree.QE.back()); tree.QE.pop_back(); return {v,x}; + */ + + // Equivalent code using multimaps: + std::multimap, std::shared_ptr>>::iterator itr = tree.QE2.begin(); + double min_edge_cost = itr -> first; + std::tuple, std::shared_ptr> edge_tuple = itr -> second; + tree.QE2.erase(itr); + return edge_tuple; } @@ -1718,8 +1808,10 @@ bool CBITPlanner::col_check_path() // (Might be a good idea to just make a function for this, it would probably come in handy) //reset queues - tree.QV.clear(); - tree.QE.clear(); + //tree.QV.clear(); + tree.QV2.clear(); + //tree.QE.clear(); + tree.QE2.clear(); // Then set a temporary goal to be the repair vertex, and signal the planner to enter repair mode p_goal_backup = p_goal; diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index 89519667a..bb49cc94b 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -20,7 +20,7 @@ Panels: - /planning curr map loc1 - /Corridor Path Left1/Topic1 Splitter Ratio: 0.4507042169570923 - Tree Height: 865 + Tree Height: 892 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -1188,7 +1188,7 @@ Visualization Manager: Class: rviz_default_plugins/Map Color Scheme: costmap Draw Behind: false - Enabled: false + Enabled: true Name: obstacle cost map Topic: Depth: 5 @@ -1204,7 +1204,7 @@ Visualization Manager: Reliability Policy: Reliable Value: /vtr/change_detection_costmap_updates Use Timestamp: false - Value: false + Value: true - Alpha: 0.699999988079071 Class: rviz_default_plugins/Map Color Scheme: raw @@ -1551,10 +1551,10 @@ Visualization Manager: Window Geometry: Displays: collapsed: false - Height: 1016 + Height: 1043 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd0000000400000000000002810000039efc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000009d00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000007400fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000039e000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d0065000000000000000500000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004b10000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003b9fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000009d00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000007400fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000003b9000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d0065000000000000000500000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004f9000003b900000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -1563,6 +1563,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1848 - X: 1992 - Y: 27 + Width: 1920 + X: 0 + Y: 0 From cb150f76e3808f4ddcfedce3dca6b40ade93b3ca Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Wed, 1 Mar 2023 08:05:50 -0500 Subject: [PATCH 12/25] merge bug fixes --- config/honeycomb_grizzly_default.yaml | 10 +-- launch/online_honeycomb_grizzly.launch.yaml | 1 + .../vtr-gui/src/components/graph/GraphMap.js | 2 +- .../preprocessing/preprocessing_module_v2.cpp | 2 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 1 + .../src/cbit/cbit_path_planner.cpp | 2 +- rviz/honeycomb.rviz | 73 +++++++++++++++++-- 7 files changed, 75 insertions(+), 16 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index dd1a23a2b..b7806f24b 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -438,7 +438,7 @@ sliding_window_width: 12.0 sliding_window_freespace_padding: 0.5 corridor_resolution: 0.05 - state_update_freq: 1.0 + state_update_freq: 2.0 update_state: true rand_seed: 1 @@ -446,7 +446,7 @@ initial_samples: 300 batch_samples: 100 pre_seed_resolution: 0.5 - alpha: 0.5 + alpha: 1.0 q_max: 2.5 frame_interval: 50 iter_max: 10000000 @@ -467,13 +467,13 @@ # Controller Params horizon_steps: 20 horizon_step_size: 0.3 - forward_vel: 1.25 - max_lin_vel: 1.75 + forward_vel: 1.00 + max_lin_vel: 1.55 max_ang_vel: 0.75 vehicle_model: "unicycle" # Cost Function Weights - pose_error_cov: [0.5, 0.5, 0.5, 10000.0, 10000.0, 10000.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + pose_error_cov: [0.1, 0.1, 0.1, 10000.0, 10000.0, 10000.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term acc_error_cov: [1.0, 1.0] kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index 24c9a1e3b..7b4f2380a 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -7,6 +7,7 @@ environment: # data_dir:=${VTRDATA}/online-test-lidar/$(date '+%F')/$(date '+%F') # data_dir:=${VTRDATA}/online-test-lidar/2022-09-08/2022-09-08 # data_dir:=${VTRDATA}/online-test-lidar/2023-01-18/2023-01-18 # Nice dome path + # data_dir:=${VTRDATA}/online-test-lidar/2023-02-20/2023-02-20 # Nice demo path from utias bay start_directory: ${VTRDATA} diff --git a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js index fd4c1754e..7f3dc6e78 100644 --- a/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js +++ b/main/src/vtr_gui/vtr_gui/vtr-gui/src/components/graph/GraphMap.js @@ -224,7 +224,7 @@ class GraphMap extends React.Component { diff --git a/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp b/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp index 249e8fa94..f25505fb1 100644 --- a/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp +++ b/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp @@ -105,7 +105,7 @@ void PreprocessingModuleV2::run_(QueryCache &qdata0, OutputCache &, if (config_->visualize) { PointCloudMsg pc2_msg; pcl::toROSMsg(*filtered_point_cloud, pc2_msg); - pc2_msg.header.frame_id = "lidar"; + pc2_msg.header.frame_id = "os_sensor"; //"lidar" for honeycomb pc2_msg.header.stamp = rclcpp::Time(*qdata.stamp); filtered_pub_->publish(pc2_msg); } diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index a39563994..441b4b5ac 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -19,6 +19,7 @@ #include "vtr_path_planning/cbit/cbit.hpp" #include "vtr_path_planning/mpc/mpc_path_planner.hpp" +#include "vtr_path_planning/mpc/mpc_path_planner2.hpp" #include #include diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index fc78f2648..495074a72 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -1279,7 +1279,7 @@ std::shared_ptr CBITPlanner::UpdateState(double path_direction) int q_sign = sgn((B.p - A.p) * (new_state->y - A.q) - ((B.q - A.q) * (new_state->x - A.p))); - q_min = q_min * q_sign; + //q_min = q_min * q_sign; //std::cout << "q_min is: " << q_min << std::endl; // debug; // Note I think we also need to take into account the direction the robot is facing on the path for reverse planning too diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index bb49cc94b..b14ad2ee1 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -18,9 +18,10 @@ Panels: - /global path1/Offset1 - /planning change detection scan1 - /planning curr map loc1 + - /Path2 - /Corridor Path Left1/Topic1 Splitter Ratio: 0.4507042169570923 - Tree Height: 892 + Tree Height: 871 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -72,16 +73,66 @@ Visualization Manager: Frame Timeout: 99999 Frames: All Enabled: false + base_footprint: + Value: false + base_footprint_bumper_part: + Value: false + base_link: + Value: false + chassis_link: + Value: false + front_axle_link: + Value: false + front_bumblebee: + Value: false + front_left_wheel_link: + Value: false + front_right_wheel_link: + Value: false + front_xb3: + Value: false + hc: + Value: false + honeycomb: + Value: false + imu_link: + Value: false + left_lidar: + Value: false lidar: Value: false loc vertex frame: Value: false loc vertex frame (offset): Value: false + m600_base: + Value: false + microstrain: + Value: false + navsat_link: + Value: false odo vertex frame: Value: false + odom: + Value: false planning frame: Value: false + rear_antenna_link: + Value: false + rear_bumblebee: + Value: false + rear_left_wheel_link: + Value: false + rear_right_wheel_link: + Value: false + rear_sensor_base_link: + Value: false + rear_sensor_plate_link: + Value: false + rear_xb3: + Value: false + right_lidar: + Value: false robot: Value: true robot planning: @@ -89,6 +140,10 @@ Visualization Manager: robot planning (extrapolated): Value: false robot planning (extrapolated) mpc: + Value: true + sensor_anchor_link: + Value: false + velodyne: Value: false world: Value: false @@ -101,6 +156,8 @@ Visualization Manager: Show Names: true Tree: world: + hc: + {} loc vertex frame: {} odo vertex frame: @@ -1160,7 +1217,7 @@ Visualization Manager: Buffer Length: 1 Class: rviz_default_plugins/Path Color: 0; 0; 0 - Enabled: true + Enabled: false Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 @@ -1183,7 +1240,7 @@ Visualization Manager: History Policy: Keep Last Reliability Policy: Reliable Value: /vtr/robot_path - Value: true + Value: false - Alpha: 0.699999988079071 Class: rviz_default_plugins/Map Color Scheme: costmap @@ -1551,10 +1608,10 @@ Visualization Manager: Window Geometry: Displays: collapsed: false - Height: 1043 + Height: 1016 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003b9fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000009d00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000007400fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000003b9000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d0065000000000000000500000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004f9000003b900000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004b1000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -1563,6 +1620,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1920 - X: 0 - Y: 0 + Width: 1848 + X: 72 + Y: 27 From 9a7c4200f65a799b8e1fc63ddbe45d3d238f61a5 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Wed, 8 Mar 2023 09:11:29 -0500 Subject: [PATCH 13/25] Implemented Fixes to Barrier Method Corridor Controller --- config/honeycomb_grizzly_default.yaml | 1 + .../path_planning/mpc_path_planner2.hpp | 49 ++ main/src/vtr_lidar/src/path_planning/cbit.cpp | 11 +- .../src/path_planning/mpc_path_planner2.cpp | 561 +++++++++++++++++ .../include/vtr_path_planning/cbit/cbit.hpp | 1 + .../mpc/custom_loss_functions.hpp | 31 + .../mpc/custom_steam_evaluators.hpp | 101 ++++ .../mpc/mpc_path_planner2.hpp | 49 ++ .../mpc/scalar_log_barrier_evaluator.hpp | 102 ++++ .../vtr_path_planning/path_planning.hpp | 4 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 15 +- .../src/mpc/lateral_error_evaluator.cpp | 104 ++++ .../src/mpc/mpc_path_planner2.cpp | 572 ++++++++++++++++++ 13 files changed, 1591 insertions(+), 10 deletions(-) create mode 100644 main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp create mode 100644 main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp create mode 100644 main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp create mode 100644 main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp create mode 100644 main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp create mode 100644 main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp create mode 100644 main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp create mode 100644 main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index b7806f24b..85b2006af 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -477,6 +477,7 @@ vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term acc_error_cov: [1.0, 1.0] kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] + lat_error_cov: [30.0] #backup #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp new file mode 100644 index 000000000..a4758b6c2 --- /dev/null +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp @@ -0,0 +1,49 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file mpc.hpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ +#include "vtr_path_planning/cbit/cbit.hpp" +#include "steam.hpp" + +#pragma once + +// Define output structure for returning the optimization result +struct mpc_result +{ + Eigen::Matrix applied_vel; + std::vector mpc_poses; +}; + +struct meas_result +{ + std::vector measurements; + bool point_stabilization; +}; + +// Declaring helper functions + +// Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); + +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); + +// Helper function for post-processing and saturating the velocity command +Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); + +// Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path +lgmath::se3::Transformation InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 424abf0ae..7a1d55538 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -17,7 +17,7 @@ * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) */ #include "vtr_lidar/path_planning/cbit.hpp" -#include "vtr_lidar/path_planning/mpc.hpp" +#include "vtr_lidar/path_planning/mpc_path_planner2.hpp" #include "vtr_lidar/cache.hpp" @@ -89,6 +89,9 @@ auto LidarCBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std:: const auto kin_error_diag = node->declare_parameter>(prefix + ".mpc.kin_error_cov", std::vector()); config->kin_error_cov.diagonal() << kin_error_diag[0], kin_error_diag[1], kin_error_diag[2], kin_error_diag[3], kin_error_diag[4], kin_error_diag[5]; + + const auto lat_error_diag = node->declare_parameter>(prefix + ".mpc.lat_error_cov", std::vector()); + config->lat_error_cov.diagonal() << lat_error_diag[0]; // MISC config->command_history_length = node->declare_parameter(prefix + ".mpc.command_history_length", config->command_history_length); @@ -330,7 +333,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // Calculate which T_ref measurements to used based on the current path solution CLOG(INFO, "mpc.cbit") << "Attempting to generate T_ref measurements"; - auto meas_result = GenerateReferenceMeas(cbit_path_ptr, robot_pose, K, DT, VF); + auto meas_result = GenerateReferenceMeas2(cbit_path_ptr, robot_pose, K, DT, VF); auto measurements = meas_result.measurements; bool point_stabilization = meas_result.point_stabilization; @@ -340,7 +343,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; @@ -357,7 +360,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // If required, saturate the output velocity commands based on the configuration limits CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; - Eigen::Matrix saturated_vel = SaturateVel(applied_vel, config_->max_lin_vel, config_->max_ang_vel); + Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp new file mode 100644 index 000000000..ef567980c --- /dev/null +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -0,0 +1,561 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file mpc.cpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ +#include "vtr_path_planning/mpc/mpc_path_planner2.hpp" +#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" +#include "vtr_lidar/cache.hpp" + +// Updated version using corridor constrained MPC + +// This file is used to generate a tracking mpc output velocity command given a discretized path to follow and optimization parameters +// It is used in cbit.cpp in both the vtr_lidar package (obstacle avoidance) and vtr_path_planning packages (obstacle free) in the computeCommand function + + +// Main MPC problem solve function +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +{ + + // Conduct an MPC Iteration given current configurations + + // Velocity set-points (desired forward velocity and angular velocity), here we set a static forward target velocity, and try to minimize rotations (0rad/sec) + Eigen::Matrix v_ref; + v_ref << VF, + 0; + + + // Kinematic projection Matrix for Unicycle Model (note its -1's because our varpi lie algebra vector is of a weird frame) + // TODO, make the choice of projection matrix a configurable param for the desired vehicle model. + Eigen::Matrix P_tran; + P_tran << -1, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, -1; + + // Lateral constraint projection matrices (Experimental) + Eigen::Matrix I_4; // In steam, the homopoint vars automatically add the 4th row 1, so representing I_4 just needs the 3 zeros + I_4 << 0, + 0, + 0; + //Eigen::Matrix I_2_tran; + //I_2_tran << 0, 1, 0, 0; + + + // Setup shared loss functions and noise models for all cost terms + const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); + + const auto sharedPoseNoiseModel = + steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); + + const auto sharedVelNoiseModel = + steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); + + const auto sharedAccelNoiseModel = + steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); + + const auto sharedKinNoiseModel = + steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); + + // Experimental lat constraint (hardcoded for now) + Eigen::Matrix lat_noise_vect; + lat_noise_vect << 100.0; + const auto sharedLatNoiseModel = + steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + + + + // Generate STEAM States for the velocity vector and SE3 state transforms + std::vector pose_states; + std::vector> vel_states; + + // Invert the extrapolated robot state and use this as the state initialization + lgmath::se3::Transformation T0_inv = T0.inverse(); + Eigen::Vector2d v0(0.0, 0.0); + + // Pushback the initial states (current robot state) + pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug + //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() + vel_states.push_back(v0); + + // Set the remaining states + for (int i=0; i pose_state_vars; + std::vector::Ptr> vel_state_vars; + std::vector measurement_vars; // This one is for storing measurements as locked evaluators for barrier constraints + // Create a locked state var for the 4th column of the identity matrix (used in state constraint) + steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); // For some reason I_4 needs to be 3x1, it cant handle 4x1's? + I_4_eval->locked() = true; + + for (int i = 0; i < K; i++) + { + pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); + vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); + } + + // Lock the first (current robot) state from being able to be modified during the optimization + pose_state_vars[0]->locked() = true; + + + // Setup the optimization problem + steam::OptimizationProblem opt_problem; + for (int i=0; i::MakeShared(pose_error_func, sharedPoseNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(pose_cost_term); + + // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); + //opt_problem.addCostTerm(vel_cost_term); + + + + // Kinematic constraints (softened but penalized heavily) + if (i < (K-1)) + { + const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); + const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself + const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); + const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); + const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(kin_cost_term); + + + // Experimental velocity set-point constraint (instead of non zero velocity penalty) + // Only add this cost term if we are not in point stabilization mode (end of path) + //if (point_stabilization == false) + //{ + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // opt_problem.addCostTerm(vel_cost_term); + //} + + + // Experimental acceleration Constraints + + if (i == 0) + { + // On the first iteration, we need to use an error with the previously applied control command state + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(accel_cost_term); + } + else + { + // Subsequent iterations we make an error between consecutive velocities. We penalize large changes in velocity between time steps + const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(accel_cost_term); + } + + } + + // Laterial Barrier State Constraints + if (i > 0) + { + // Generate a locked transform evaluator to store the current measurement for state constraints + // The reason we make it a variable and lock it is so we can use the built in steam evaluators which require evaluable inputs + measurement_vars.push_back(steam::se3::SE3StateVar::MakeShared(measurements[i])); + measurement_vars[i-1]->locked() = true; + + // Take the compose inverse of the locked measurement w.r.t the state transforms + const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i-1], pose_state_vars[i]); + + // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components + const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); + + // Using a custom HomoPointErrorEvaluator, get lateral error (which is the same as the built-in stereo error evaluator but isolates the lateral error component of the 4x1 homo point vector error) + // We do this twice, once for each side of the corridor state constraint + + // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable + Eigen::Matrix barrier_right; + barrier_right << 0, + -2.5, + 0, + 1; + Eigen::Matrix barrier_left; + barrier_left << 0.0, + 2.5, + 0, + 1; + + const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else + const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + + // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound + const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); + const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + + // Generate least square cost terms and add them to the optimization problem + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term_right); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term_left); + //CLOG(WARNING, "debug") << "Running the cbit one"; + } + + + + // Experimental Hard Lateral Constraints + /* + CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; + const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); + const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators + const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); + const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); + const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term); + CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; + */ + } + + // Solve the optimization problem with GuassNewton solver + using SolverType = steam::GaussNewtonSolver; + // Initialize parameters (enable verbose mode) + SolverType::Params params; + params.verbose = true; // Makes the output display for debug when true + //params.relative_cost_change_threshold = 0.0000000001; + //params.max_iterations = 500; + //params.absolute_cost_change_threshold = 0.0000000001; + SolverType solver(opt_problem, params); + solver.optimize(); + + // DEBUG: Display the several of the prediction horizon results + /* + CLOG(DEBUG, "mpc_debug.cbit") << "Trying to Display the Optimization Results for the isolated MPC"; + CLOG(DEBUG, "mpc_debug.cbit") << "The First State is: " << pose_state_vars[0]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Second State is: " << pose_state_vars[1]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Third State is: " << pose_state_vars[2]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Forth State is: " << pose_state_vars[3]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Fifth State is: " << pose_state_vars[4]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Sixth State is: " << pose_state_vars[5]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Seventh State is: " << pose_state_vars[6]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Eighth State is: " << pose_state_vars[7]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Ninth State is: " << pose_state_vars[8]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Tenth State is: " << pose_state_vars[9]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The First Velocity is: " << vel_state_vars[0]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Second Velocity is: " << vel_state_vars[1]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Third Velocity is: " << vel_state_vars[2]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Forth Velocity is: " << vel_state_vars[3]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Fifth Velocity is: " << vel_state_vars[4]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Sixth Velocity is: " << vel_state_vars[5]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Seventh Velocity is: " << vel_state_vars[6]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Eighth Velocity is: " << vel_state_vars[7]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Ninth Velocity is: " << vel_state_vars[8]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Tenth Velocity is: " << vel_state_vars[9]->value(); + + + CLOG(DEBUG, "mpc_debug.cbit") << "Linear Component to Return is: " << (vel_state_vars[0]->value())[0]; + CLOG(DEBUG, "mpc_debug.cbit") << "Angular Component to Return is: " << (vel_state_vars[0]->value())[1]; + */ + + // Store the velocity command to apply + Eigen::Matrix applied_vel = vel_state_vars[0]->value(); + + // First check if any of the values are nan, if so we return a zero velocity and flag the error + Eigen::Matrix nan_vel; + if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) + { + CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; + nan_vel(0) = 0.0; + nan_vel(1) = 0.0; + + // if we do detect nans, return the mpc_poses as all being the robots current pose (not moving across the horizon as we should be stopped) + std::vector mpc_poses; + for (int i = 0; i mpc_poses; + for (int i = 0; ivalue().inverse()); + } + + + // Return the resulting structure + return {applied_vel, mpc_poses}; + } +} + + + + + + + +// helper function for computing the optimization reference poses T_ref based on the current path solution +struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF) +{ + + + // Save a copy of the current path solution to work on + auto cbit_path = *cbit_path_ptr; + + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) + + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; + double new_dist; + double dx; + double dy; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state + dx = (cbit_path)[i].x - std::get<0>(robot_pose); + dy = (cbit_path)[i].y - std::get<1>(robot_pose); + new_dist = sqrt((dx * dx) + (dy * dy)); + if (new_dist < min_dist) + { + CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + p_correction = lookahead_dist; + min_dist = new_dist; + } + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) + { + break; + } + } + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) + { + + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + + // add to measurement vector + measurements.push_back(meas); + } + + return {measurements, point_stabilization}; + + + //End of Experimental Changes + +} + +// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// Then tries to output a euclidean pose interpolated for the desired p_meas. +lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) +{ + // Find the lower bound of the p values + for (int i = 0; i < cbit_p.size(); i++) + { + if (cbit_p[i] < p_val) + { + continue; + } + else + { + double p_lower = cbit_p[i-1]; + double p_upper = cbit_p[i]; + Pose pose_lower = cbit_path[i-1]; + Pose pose_upper = cbit_path[i]; + + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + // For yaw we need to be abit careful about sign and angle wrap around + // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts + + double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + + + // Build the transformation matrix + Eigen::Matrix4d T_ref; + T_ref << std::cos(yaw_int),-1*std::sin(yaw_int),0, x_int, + std::sin(yaw_int), std::cos(yaw_int),0, y_int, + 0, 0, 1, z_int, + 0, 0, 0, 1; + T_ref = T_ref.inverse().eval(); + + lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); + + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; + return meas; + } + } +} + + +// Simple function for checking that the current output velocity command is saturated between our mechanical velocity limits +Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim) +{ + double command_lin_x; + double command_ang_z; + Eigen::Matrix saturated_vel; + + // Moved nan check to the main mpc solver function + /* + // First check if any of the values are nan, if so we return a zero velocity and flag the error + if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) + { + CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; + saturated_vel(0) = 0.0; + saturated_vel(1) = 0.0; + return saturated_vel; + } + */ + + if (abs(applied_vel(0)) >= v_lim) + { + if (applied_vel(0) > 0.0) + { + command_lin_x = v_lim; + } + else if (applied_vel(0) < 0.0) + { + command_lin_x = -1.0* v_lim; + } + } + // Removed for bi-directional control purposes + //else if (applied_vel(0) <= 0.0) + //{ + // command_lin_x = 0.0; + //} + else + { + command_lin_x = applied_vel(0) ; + } + + if (abs(applied_vel(1)) >= w_lim) + { + if (applied_vel(1) > 0.0) + { + command_ang_z = w_lim; + } + else if (applied_vel(1) < 0.0) + { + command_ang_z = -1.0 * w_lim; + } + } + //else if (applied_vel(1) <= -1*w_lim) + //{ + // command_ang_z = -1*w_lim; + //} + else + { + command_ang_z = applied_vel(1) ; + } + + // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 + + saturated_vel << command_lin_x, command_ang_z; + return saturated_vel; +} + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index c4040d63e..0108738d1 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -109,6 +109,7 @@ class CBIT : public BasePathPlanner { Eigen::Matrix vel_error_cov = Eigen::Matrix::Zero(); Eigen::Matrix acc_error_cov = Eigen::Matrix::Zero(); Eigen::Matrix kin_error_cov = Eigen::Matrix::Zero(); + Eigen::Matrix lat_error_cov = Eigen::Matrix::Zero(); // Misc diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp new file mode 100644 index 000000000..3ed0652fd --- /dev/null +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "steam/problem/loss_func/base_loss_func.hpp" + +namespace steam { + +/** \brief 'L2' loss function with different weights: TODO long term make a version of this class which is more generic and can dynamically set the weight */ +class L2WeightedLossFunc : public BaseLossFunc { + public: + /** \brief Convenience typedefs */ + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; + + static Ptr MakeShared() { return std::make_shared(); } + + /** \brief Constructor */ + L2WeightedLossFunc() = default; + + /** \brief Cost function (basic evaluation of the loss function) */ + double cost(double whitened_error_norm) const override { + return 0.5 * whitened_error_norm * whitened_error_norm; + } + + /** + * \brief Weight for iteratively reweighted least-squares (influence function + * div. by error) + */ + double weight(double whitened_error_norm) const override { return 0.01; } +}; + +} // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp new file mode 100644 index 000000000..749840c9c --- /dev/null +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp @@ -0,0 +1,101 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file custom_steam_evaluators.hpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ + +#pragma once + +#include "steam/evaluable/state_var.hpp" + +namespace steam { +namespace stereo { + +class HomoPointErrorEvaluatorLeft : public Evaluable> { + public: + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; + + using InType = Eigen::Vector4d; + using OutType = Eigen::Matrix; + + static Ptr MakeShared(const Evaluable::ConstPtr& pt, + const InType& meas_pt); + HomoPointErrorEvaluatorLeft(const Evaluable::ConstPtr& pt, + const InType& meas_pt); + + bool active() const override; + void getRelatedVarKeys(KeySet& keys) const override; + + OutType value() const override; + Node::Ptr forward() const override; + void backward(const Eigen::MatrixXd& lhs, const Node::Ptr& node, + Jacobians& jacs) const override; + + private: + /** \brief Transform evaluable */ + const Evaluable::ConstPtr pt_; + /** \brief Landmark state variable */ + const InType meas_pt_; + // constants + Eigen::Matrix D_ = Eigen::Matrix::Zero(); +}; + +HomoPointErrorEvaluatorLeft::Ptr homo_point_error_left( + const Evaluable::ConstPtr& pt, + const HomoPointErrorEvaluatorLeft::InType& meas_pt); + + + + + +class HomoPointErrorEvaluatorRight : public Evaluable> { + public: + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; + + using InType = Eigen::Vector4d; + using OutType = Eigen::Matrix; + + static Ptr MakeShared(const Evaluable::ConstPtr& pt, + const InType& meas_pt); + HomoPointErrorEvaluatorRight(const Evaluable::ConstPtr& pt, + const InType& meas_pt); + + bool active() const override; + void getRelatedVarKeys(KeySet& keys) const override; + + OutType value() const override; + Node::Ptr forward() const override; + void backward(const Eigen::MatrixXd& lhs, const Node::Ptr& node, + Jacobians& jacs) const override; + + private: + /** \brief Transform evaluable */ + const Evaluable::ConstPtr pt_; + /** \brief Landmark state variable */ + const InType meas_pt_; + // constants + Eigen::Matrix D_ = Eigen::Matrix::Zero(); +}; + +HomoPointErrorEvaluatorRight::Ptr homo_point_error_right( + const Evaluable::ConstPtr& pt, + const HomoPointErrorEvaluatorRight::InType& meas_pt); + + +} // namespace stereo +} // namespace steam diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp new file mode 100644 index 000000000..a4758b6c2 --- /dev/null +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp @@ -0,0 +1,49 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file mpc.hpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ +#include "vtr_path_planning/cbit/cbit.hpp" +#include "steam.hpp" + +#pragma once + +// Define output structure for returning the optimization result +struct mpc_result +{ + Eigen::Matrix applied_vel; + std::vector mpc_poses; +}; + +struct meas_result +{ + std::vector measurements; + bool point_stabilization; +}; + +// Declaring helper functions + +// Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); + +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); + +// Helper function for post-processing and saturating the velocity command +Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); + +// Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path +lgmath::se3::Transformation InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp new file mode 100644 index 000000000..aaed54465 --- /dev/null +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp @@ -0,0 +1,102 @@ +#pragma once + +#include + +#include "steam/evaluable/evaluable.hpp" + +namespace steam { +namespace vspace { + +template +class ScalarLogBarrierEvaluator : public Evaluable> { + public: + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; + + using InType = Eigen::Matrix; + using OutType = Eigen::Matrix; + + static Ptr MakeShared(const typename Evaluable::ConstPtr& v); + ScalarLogBarrierEvaluator(const typename Evaluable::ConstPtr& v); + + bool active() const override; + using KeySet = typename Evaluable::KeySet; + void getRelatedVarKeys(KeySet& keys) const override; + + OutType value() const override; + typename Node::Ptr forward() const override; + void backward(const Eigen::MatrixXd& lhs, + const typename Node::Ptr& node, + Jacobians& jacs) const override; + + private: + const typename Evaluable::ConstPtr v_; +}; + +// clang-format off +template +typename ScalarLogBarrierEvaluator::Ptr slogbar( + const typename Evaluable::InType>::ConstPtr& v); +// clang-format on + +} // namespace vspace +} // namespace steam + +namespace steam { +namespace vspace { + +template +auto ScalarLogBarrierEvaluator::MakeShared( + const typename Evaluable::ConstPtr& v) -> Ptr { + return std::make_shared(v); +} + +template +ScalarLogBarrierEvaluator::ScalarLogBarrierEvaluator( + const typename Evaluable::ConstPtr& v) + : v_(v) {} + +template +bool ScalarLogBarrierEvaluator::active() const { + return v_->active(); +} + +template +void ScalarLogBarrierEvaluator::getRelatedVarKeys(KeySet& keys) const { + v_->getRelatedVarKeys(keys); +} + +template +auto ScalarLogBarrierEvaluator::value() const -> OutType { + return log(v_->value()); +} + +template +auto ScalarLogBarrierEvaluator::forward() const -> typename Node::Ptr { + const auto child = v_->forward(); + const auto value = log(child->value()); + const auto node = Node::MakeShared(value); + node->addChild(child); + return node; +} + +template +void ScalarLogBarrierEvaluator::backward(const Eigen::MatrixXd& lhs, + const typename Node::Ptr& node, + Jacobians& jacs) const { + const auto child = std::static_pointer_cast>(node->at(0)); + if (v_->active()) { + v_->backward(1.0 /(lhs), child, jacs); + } +} + +// clang-format off +template +typename ScalarLogBarrierEvaluator::Ptr slogbar( + const typename Evaluable::InType>::ConstPtr& v) { + return ScalarLogBarrierEvaluator::MakeShared(v); +} +// clang-format on + +} // namespace vspace +} // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/path_planning.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/path_planning.hpp index 41c17b9d5..95dce5e24 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/path_planning.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/path_planning.hpp @@ -17,7 +17,7 @@ * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) */ #pragma once - -#include "vtr_path_planning/mpc/mpc_path_planner.hpp" +#include "vtr_path_planning/mpc/mpc_path_planner2.hpp" +//#include "vtr_path_planning/mpc/mpc_path_planner.hpp" #include "vtr_path_planning/teb/teb_path_planner.hpp" #include "vtr_path_planning/cbit/cbit.hpp" \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 441b4b5ac..19e94bfa7 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -18,9 +18,10 @@ */ #include "vtr_path_planning/cbit/cbit.hpp" -#include "vtr_path_planning/mpc/mpc_path_planner.hpp" +//#include "vtr_path_planning/mpc/mpc_path_planner.hpp" #include "vtr_path_planning/mpc/mpc_path_planner2.hpp" + #include #include @@ -93,6 +94,8 @@ auto CBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std::strin const auto kin_error_diag = node->declare_parameter>(prefix + ".mpc.kin_error_cov", std::vector()); config->kin_error_cov.diagonal() << kin_error_diag[0], kin_error_diag[1], kin_error_diag[2], kin_error_diag[3], kin_error_diag[4], kin_error_diag[5]; + const auto lat_error_diag = node->declare_parameter>(prefix + ".mpc.lat_error_cov", std::vector()); + config->lat_error_cov.diagonal() << lat_error_diag[0]; // MISC config->command_history_length = node->declare_parameter(prefix + ".mpc.command_history_length", config->command_history_length); @@ -332,6 +335,10 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { Eigen::Matrix kin_noise_vect; kin_noise_vect = config_->kin_error_cov; + // Lateral Covariance Weights (should be weighted quite heavily (smaller is higher because its covariance)) + Eigen::Matrix lat_noise_vect; + lat_noise_vect = config_->lat_error_cov; + // Extrapolating robot pose into the future by using the history of applied mpc velocity commands @@ -401,7 +408,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // Calculate which T_ref measurements to used based on the current path solution CLOG(INFO, "mpc.cbit") << "Attempting to generate T_ref measurements"; - auto meas_result = GenerateReferenceMeas(cbit_path_ptr, robot_pose, K, DT, VF); + auto meas_result = GenerateReferenceMeas2(cbit_path_ptr, robot_pose, K, DT, VF); auto measurements = meas_result.measurements; bool point_stabilization = meas_result.point_stabilization; @@ -411,7 +418,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; @@ -428,7 +435,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // If required, saturate the output velocity commands based on the configuration limits CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; - Eigen::Matrix saturated_vel = SaturateVel(applied_vel, config_->max_lin_vel, config_->max_ang_vel); + Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); diff --git a/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp new file mode 100644 index 000000000..e711eaaca --- /dev/null +++ b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp @@ -0,0 +1,104 @@ +#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" + +namespace steam { +namespace stereo { + +auto HomoPointErrorEvaluatorRight::MakeShared(const Evaluable::ConstPtr& pt, + const InType& meas_pt) -> Ptr { + return std::make_shared(pt, meas_pt); +} + +HomoPointErrorEvaluatorRight::HomoPointErrorEvaluatorRight( + const Evaluable::ConstPtr& pt, const InType& meas_pt) + : pt_(pt), meas_pt_(meas_pt) { + //D_.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity(); + D_(0,1) = 1; + +} + +bool HomoPointErrorEvaluatorRight::active() const { return pt_->active(); } + +void HomoPointErrorEvaluatorRight::getRelatedVarKeys(KeySet& keys) const { + pt_->getRelatedVarKeys(keys); +} + +auto HomoPointErrorEvaluatorRight::value() const -> OutType { + return D_ * (pt_->value() - meas_pt_); +} + +auto HomoPointErrorEvaluatorRight::forward() const -> Node::Ptr { + const auto child = pt_->forward(); + const auto value = D_ * (child->value() - meas_pt_); + const auto node = Node::MakeShared(value); + node->addChild(child); + return node; +} + +void HomoPointErrorEvaluatorRight::backward(const Eigen::MatrixXd& lhs, + const Node::Ptr& node, + Jacobians& jacs) const { + if (pt_->active()) { + const auto child = std::static_pointer_cast>(node->at(0)); + Eigen::MatrixXd new_lhs = lhs * D_; + pt_->backward(new_lhs, child, jacs); + } +} + +HomoPointErrorEvaluatorRight::Ptr homo_point_error_right( + const Evaluable::ConstPtr& pt, + const HomoPointErrorEvaluatorRight::InType& meas_pt) { + return HomoPointErrorEvaluatorRight::MakeShared(pt, meas_pt); +} + + + + +auto HomoPointErrorEvaluatorLeft::MakeShared(const Evaluable::ConstPtr& pt, + const InType& meas_pt) -> Ptr { + return std::make_shared(pt, meas_pt); +} + +HomoPointErrorEvaluatorLeft::HomoPointErrorEvaluatorLeft( + const Evaluable::ConstPtr& pt, const InType& meas_pt) + : pt_(pt), meas_pt_(meas_pt) { + //D_.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity(); + D_(0,1) = 1; + +} + +bool HomoPointErrorEvaluatorLeft::active() const { return pt_->active(); } + +void HomoPointErrorEvaluatorLeft::getRelatedVarKeys(KeySet& keys) const { + pt_->getRelatedVarKeys(keys); +} + +auto HomoPointErrorEvaluatorLeft::value() const -> OutType { + return D_ * (meas_pt_ - pt_->value()); +} + +auto HomoPointErrorEvaluatorLeft::forward() const -> Node::Ptr { + const auto child = pt_->forward(); + const auto value = D_ * (meas_pt_ - child->value()); + const auto node = Node::MakeShared(value); + node->addChild(child); + return node; +} + +void HomoPointErrorEvaluatorLeft::backward(const Eigen::MatrixXd& lhs, + const Node::Ptr& node, + Jacobians& jacs) const { + if (pt_->active()) { + const auto child = std::static_pointer_cast>(node->at(0)); + Eigen::MatrixXd new_lhs = -lhs * D_; + pt_->backward(new_lhs, child, jacs); + } +} + +HomoPointErrorEvaluatorLeft::Ptr homo_point_error_left( + const Evaluable::ConstPtr& pt, + const HomoPointErrorEvaluatorLeft::InType& meas_pt) { + return HomoPointErrorEvaluatorLeft::MakeShared(pt, meas_pt); +} + +} // namespace stereo +} // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp new file mode 100644 index 000000000..1b6ea39a6 --- /dev/null +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -0,0 +1,572 @@ +// Copyright 2021, Autonomous Space Robotics Lab (ASRL) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * \file mpc.cpp + * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) + */ +#include "vtr_path_planning/mpc/mpc_path_planner2.hpp" +#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/custom_loss_functions.hpp" +#include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" +//#include "vtr_lidar/cache.hpp" // For lidar version of the planner only + +// Updated version using corridor constrained MPC + +// This file is used to generate a tracking mpc output velocity command given a discretized path to follow and optimization parameters +// It is used in cbit.cpp in both the vtr_lidar package (obstacle avoidance) and vtr_path_planning packages (obstacle free) in the computeCommand function + + +// Main MPC problem solve function +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +{ + + // Conduct an MPC Iteration given current configurations + + // Velocity set-points (desired forward velocity and angular velocity), here we set a static forward target velocity, and try to minimize rotations (0rad/sec) + Eigen::Matrix v_ref; + v_ref << VF, + 0; + + + // Kinematic projection Matrix for Unicycle Model (note its -1's because our varpi lie algebra vector is of a weird frame) + // TODO, make the choice of projection matrix a configurable param for the desired vehicle model. + Eigen::Matrix P_tran; + P_tran << -1, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, -1; + + // Lateral constraint projection matrices (Experimental) + Eigen::Matrix I_4; // In steam, the homopoint vars automatically add the 4th row 1, so representing I_4 just needs the 3 zeros + I_4 << 0, + 0, + 0; + //Eigen::Matrix I_2_tran; + //I_2_tran << 0, 1, 0, 0; + + + // Setup shared loss functions and noise models for all cost terms + const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); + + const auto sharedPoseNoiseModel = + steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); + + const auto sharedVelNoiseModel = + steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); + + const auto sharedAccelNoiseModel = + steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); + + const auto sharedKinNoiseModel = + steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); + + // Experimental lat constraint (hardcoded for now) + Eigen::Matrix lat_noise_vect; + lat_noise_vect << 15.0; + const auto sharedLatNoiseModel = + steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + + + + // Generate STEAM States for the velocity vector and SE3 state transforms + std::vector pose_states; + std::vector> vel_states; + + // Invert the extrapolated robot state and use this as the state initialization + lgmath::se3::Transformation T0_inv = T0.inverse(); + Eigen::Vector2d v0(0.0, 0.0); + + // Pushback the initial states (current robot state) + pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug + //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() + vel_states.push_back(v0); + + // Set the remaining states + for (int i=0; i pose_state_vars; + std::vector::Ptr> vel_state_vars; + std::vector measurement_vars; // This one is for storing measurements as locked evaluators for barrier constraints + // Create a locked state var for the 4th column of the identity matrix (used in state constraint) + steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); // For some reason I_4 needs to be 3x1, it cant handle 4x1's? + I_4_eval->locked() = true; + + for (int i = 0; i < K; i++) + { + pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); + vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); + } + + // Lock the first (current robot) state from being able to be modified during the optimization + pose_state_vars[0]->locked() = true; + + + // Setup the optimization problem + steam::OptimizationProblem opt_problem; + for (int i=0; i::MakeShared(pose_error_func, sharedPoseNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(pose_cost_term); + + // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); + //opt_problem.addCostTerm(vel_cost_term); + + + + // Kinematic constraints (softened but penalized heavily) + if (i < (K-1)) + { + const auto lhs = steam::se3::ComposeInverseEvaluator::MakeShared(pose_state_vars[i+1], pose_state_vars[i]); + const auto vel_proj = steam::vspace::MatrixMultEvaluator<6,2>::MakeShared(vel_state_vars[i], P_tran); // TODO, I guess this version of steam doesnt have this one, will need to do it myself + const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); + const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); + const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(kin_cost_term); + + + // Experimental velocity set-point constraint (instead of non zero velocity penalty) + // Only add this cost term if we are not in point stabilization mode (end of path) + //if (point_stabilization == false) + //{ + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // opt_problem.addCostTerm(vel_cost_term); + //} + + + // Experimental acceleration Constraints + + if (i == 0) + { + // On the first iteration, we need to use an error with the previously applied control command state + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(accel_cost_term); + } + else + { + // Subsequent iterations we make an error between consecutive velocities. We penalize large changes in velocity between time steps + const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(accel_cost_term); + } + + } + + // Laterial Barrier State Constraints + if (i > 0) + { + // Generate a locked transform evaluator to store the current measurement for state constraints + // The reason we make it a variable and lock it is so we can use the built in steam evaluators which require evaluable inputs + measurement_vars.push_back(steam::se3::SE3StateVar::MakeShared(measurements[i])); + measurement_vars[i-1]->locked() = true; + + // Take the compose inverse of the locked measurement w.r.t the state transforms + const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i-1], pose_state_vars[i]); + + // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components + const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); + + // Using a custom HomoPointErrorEvaluator, get lateral error (which is the same as the built-in stereo error evaluator but isolates the lateral error component of the 4x1 homo point vector error) + // We do this twice, once for each side of the corridor state constraint + + // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable + Eigen::Matrix barrier_right; + barrier_right << 0, + -2.5, + 0, + 1; + Eigen::Matrix barrier_left; + barrier_left << 0.0, + 2.5, + 0, + 1; + + const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else + const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + + // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound + const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); + const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + + // Generate least square cost terms and add them to the optimization problem + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term_right); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term_left); + //CLOG(WARNING, "debug") << "Running the cbit one"; + } + + + + // Experimental Hard Lateral Constraints + /* + CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; + const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); + const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators + const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); + const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); + const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); + opt_problem.addCostTerm(lat_cost_term); + CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; + */ + } + + // Solve the optimization problem with GuassNewton solver + using SolverType = steam::GaussNewtonSolver; + // Initialize parameters (enable verbose mode) + SolverType::Params params; + params.verbose = true; // Makes the output display for debug when true + //params.relative_cost_change_threshold = 0.0000000001; + //params.max_iterations = 500; + //params.absolute_cost_change_threshold = 0.0000000001; + SolverType solver(opt_problem, params); + solver.optimize(); + + // DEBUG: Display the several of the prediction horizon results + /* + CLOG(DEBUG, "mpc_debug.cbit") << "Trying to Display the Optimization Results for the isolated MPC"; + CLOG(DEBUG, "mpc_debug.cbit") << "The First State is: " << pose_state_vars[0]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Second State is: " << pose_state_vars[1]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Third State is: " << pose_state_vars[2]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Forth State is: " << pose_state_vars[3]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Fifth State is: " << pose_state_vars[4]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Sixth State is: " << pose_state_vars[5]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Seventh State is: " << pose_state_vars[6]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Eighth State is: " << pose_state_vars[7]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Ninth State is: " << pose_state_vars[8]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Tenth State is: " << pose_state_vars[9]->value().inverse(); + CLOG(DEBUG, "mpc_debug.cbit") << "The First Velocity is: " << vel_state_vars[0]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Second Velocity is: " << vel_state_vars[1]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Third Velocity is: " << vel_state_vars[2]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Forth Velocity is: " << vel_state_vars[3]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Fifth Velocity is: " << vel_state_vars[4]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Sixth Velocity is: " << vel_state_vars[5]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Seventh Velocity is: " << vel_state_vars[6]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Eighth Velocity is: " << vel_state_vars[7]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Ninth Velocity is: " << vel_state_vars[8]->value(); + CLOG(DEBUG, "mpc_debug.cbit") << "The Tenth Velocity is: " << vel_state_vars[9]->value(); + + + CLOG(DEBUG, "mpc_debug.cbit") << "Linear Component to Return is: " << (vel_state_vars[0]->value())[0]; + CLOG(DEBUG, "mpc_debug.cbit") << "Angular Component to Return is: " << (vel_state_vars[0]->value())[1]; + */ + + // Store the velocity command to apply + Eigen::Matrix applied_vel = vel_state_vars[0]->value(); + + // First check if any of the values are nan, if so we return a zero velocity and flag the error + Eigen::Matrix nan_vel; + if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) + { + CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; + nan_vel(0) = 0.0; + nan_vel(1) = 0.0; + + // if we do detect nans, return the mpc_poses as all being the robots current pose (not moving across the horizon as we should be stopped) + std::vector mpc_poses; + for (int i = 0; i mpc_poses; + for (int i = 0; ivalue().inverse()); + } + + + // Return the resulting structure + return {applied_vel, mpc_poses}; + } +} + + + + + + + +// helper function for computing the optimization reference poses T_ref based on the current path solution +struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF) +{ + + + // Save a copy of the current path solution to work on + auto cbit_path = *cbit_path_ptr; + + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) + + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; + double new_dist; + double dx; + double dy; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state + dx = (cbit_path)[i].x - std::get<0>(robot_pose); + dy = (cbit_path)[i].y - std::get<1>(robot_pose); + new_dist = sqrt((dx * dx) + (dy * dy)); + if (new_dist < min_dist) + { + CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + p_correction = lookahead_dist; + min_dist = new_dist; + } + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) + { + break; + } + } + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) + { + + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + + // add to measurement vector + measurements.push_back(meas); + } + + return {measurements, point_stabilization}; + + + //End of Experimental Changes + +} + +// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// Then tries to output a euclidean pose interpolated for the desired p_meas. +lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) +{ + // Find the lower bound of the p values + for (int i = 0; i < cbit_p.size(); i++) + { + if (cbit_p[i] < p_val) + { + continue; + } + else + { + double p_lower = cbit_p[i-1]; + double p_upper = cbit_p[i]; + Pose pose_lower = cbit_path[i-1]; + Pose pose_upper = cbit_path[i]; + + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + // For yaw we need to be abit careful about sign and angle wrap around + // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts + + double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + + + // Build the transformation matrix + Eigen::Matrix4d T_ref; + T_ref << std::cos(yaw_int),-1*std::sin(yaw_int),0, x_int, + std::sin(yaw_int), std::cos(yaw_int),0, y_int, + 0, 0, 1, z_int, + 0, 0, 0, 1; + T_ref = T_ref.inverse().eval(); + + lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); + + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; + return meas; + } + } +} + + +// Simple function for checking that the current output velocity command is saturated between our mechanical velocity limits +Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim) +{ + double command_lin_x; + double command_ang_z; + Eigen::Matrix saturated_vel; + + // Moved nan check to the main mpc solver function + /* + // First check if any of the values are nan, if so we return a zero velocity and flag the error + if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) + { + CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; + saturated_vel(0) = 0.0; + saturated_vel(1) = 0.0; + return saturated_vel; + } + */ + + if (abs(applied_vel(0)) >= v_lim) + { + if (applied_vel(0) > 0.0) + { + command_lin_x = v_lim; + } + else if (applied_vel(0) < 0.0) + { + command_lin_x = -1.0* v_lim; + } + } + // Removed for bi-directional control purposes + //else if (applied_vel(0) <= 0.0) + //{ + // command_lin_x = 0.0; + //} + else + { + command_lin_x = applied_vel(0) ; + } + + if (abs(applied_vel(1)) >= w_lim) + { + if (applied_vel(1) > 0.0) + { + command_ang_z = w_lim; + } + else if (applied_vel(1) < 0.0) + { + command_ang_z = -1.0 * w_lim; + } + } + //else if (applied_vel(1) <= -1*w_lim) + //{ + // command_ang_z = -1*w_lim; + //} + else + { + command_ang_z = applied_vel(1) ; + } + + // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 + + saturated_vel << command_lin_x, command_ang_z; + return saturated_vel; +} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file From 295a71881dfe690f1994102c0d0c10223ff0570d Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 27 Mar 2023 09:47:20 -0400 Subject: [PATCH 14/25] Fixed stability issues with the obstacle free-version of the corridor mpc --- config/honeycomb_grizzly_default.yaml | 4 +- .../path_planning/mpc_path_planner2.hpp | 5 +- main/src/vtr_lidar/src/path_planning/cbit.cpp | 2 +- .../src/path_planning/mpc_path_planner2.cpp | 166 ++++++++++++++---- .../include/vtr_path_planning/cbit/cbit.hpp | 3 + .../vtr_path_planning/cbit/generate_pq.hpp | 1 + .../mpc/custom_loss_functions.hpp | 16 +- .../mpc/custom_steam_evaluators.hpp | 2 +- .../mpc/mpc_path_planner2.hpp | 6 +- .../mpc/scalar_log_barrier_evaluator.hpp | 10 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 17 +- .../src/cbit/generate_pq.cpp | 3 + .../src/mpc/mpc_path_planner2.cpp | 138 +++++++++++++-- 13 files changed, 315 insertions(+), 58 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 85b2006af..1373ed1a5 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -473,11 +473,11 @@ vehicle_model: "unicycle" # Cost Function Weights - pose_error_cov: [0.1, 0.1, 0.1, 10000.0, 10000.0, 10000.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + pose_error_cov: [0.75, 0.75, 0.75, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term acc_error_cov: [1.0, 1.0] kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] - lat_error_cov: [30.0] + lat_error_cov: [100.0] #backup #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp index a4758b6c2..db0393553 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp @@ -37,11 +37,14 @@ struct meas_result // Declaring helper functions // Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); + // Helper function for post-processing and saturating the velocity command Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 7a1d55538..b36fe0e1f 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -343,7 +343,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC2(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index ef567980c..26050ce4c 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -18,6 +18,7 @@ */ #include "vtr_path_planning/mpc/mpc_path_planner2.hpp" #include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/custom_loss_functions.hpp" #include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" #include "vtr_lidar/cache.hpp" @@ -28,7 +29,7 @@ // Main MPC problem solve function -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) { // Conduct an MPC Iteration given current configurations @@ -59,7 +60,9 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup shared loss functions and noise models for all cost terms - const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); + const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); // The default L2 loss function weights all cost terms with a value of 1.0 + + const auto lateralLossFunc = steam::L2WeightedLossFunc::MakeShared(0.01); // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); @@ -76,8 +79,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Experimental lat constraint (hardcoded for now) Eigen::Matrix lat_noise_vect; lat_noise_vect << 100.0; - const auto sharedLatNoiseModel = - steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + const auto sharedLatNoiseModel = steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); @@ -94,11 +96,17 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() vel_states.push_back(v0); + CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; + CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; + CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; + + // Set the remaining states for (int i=0; i previous_vel, lgmath::se for (int i = 0; i < K; i++) { - pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); + pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); } @@ -224,9 +232,9 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem - const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, sharedLossFunc); + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, lateralLossFunc); opt_problem.addCostTerm(lat_cost_term_right); - const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, sharedLossFunc); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, lateralLossFunc); opt_problem.addCostTerm(lat_cost_term_left); //CLOG(WARNING, "debug") << "Running the cbit one"; } @@ -251,9 +259,9 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Initialize parameters (enable verbose mode) SolverType::Params params; params.verbose = true; // Makes the output display for debug when true - //params.relative_cost_change_threshold = 0.0000000001; + params.relative_cost_change_threshold = 0.000001; //params.max_iterations = 500; - //params.absolute_cost_change_threshold = 0.0000000001; + params.absolute_cost_change_threshold = 0.000001; SolverType solver(opt_problem, params); solver.optimize(); @@ -426,6 +434,123 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } +// For generating VT&R teach path measurements +struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) +{ + // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths + CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; + CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + + // load the teach path + std::vector teach_path = global_path_ptr->disc_path; + //auto teach_p = global_path_ptr->p; + std::vector cbit_path; + // Get rid of any of the path before the current sid + + // handle case at start of path + if (current_sid - 1 < 0) + { + current_sid = 1; + } + // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot + for (int i = (current_sid-1); i < teach_path.size(); i++) + { + cbit_path.push_back(teach_path[i]); + } + + // Save a copy of the current path solution to work on + //auto cbit_path = *cbit_path_ptr; + + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) + + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; + double new_dist; + double dx; + double dy; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state + dx = (cbit_path)[i].x - std::get<0>(robot_pose); + dy = (cbit_path)[i].y - std::get<1>(robot_pose); + new_dist = sqrt((dx * dx) + (dy * dy)); + if (new_dist < min_dist) + { + CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + p_correction = lookahead_dist; + min_dist = new_dist; + } + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) + { + break; + } + } + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) + { + + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + + + // add to measurement vector + measurements.push_back(meas); + } + + return {measurements, point_stabilization}; +} + + + // function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) @@ -538,24 +663,3 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel } - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index 0108738d1..c7018b864 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -173,6 +173,9 @@ class CBIT : public BasePathPlanner { // Pointers to the corridor std::shared_ptr corridor_ptr; + // Pointer to the global path + std::shared_ptr global_path_ptr; + tactic::Timestamp prev_stamp; // Store the previously applied velocity and a sliding window history of MPC results diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp index e6a8cc1b9..a2d109bd4 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp @@ -37,6 +37,7 @@ class CBITPath { std::vector disc_path; // Stores the se3 discrete path as a vector of Euclidean vectors (The original disc path) // TODO, change to se(3) class std::vector p; //associated p values for each pose in disc_path std::vector path; // Stores the se3 splined discrete path as a vector of Euclidean vectors; //TODO, change to se(3) class + std::vector sid; // store the sid value of the transform from the teach path CBITPath(CBITConfig config, std::vector initial_path); // constructor; need to feed this the path CBITPath() = default; diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp index 3ed0652fd..a3df1c522 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_loss_functions.hpp @@ -6,15 +6,24 @@ namespace steam { /** \brief 'L2' loss function with different weights: TODO long term make a version of this class which is more generic and can dynamically set the weight */ class L2WeightedLossFunc : public BaseLossFunc { + private: + double weight_val = 1.0; + public: /** \brief Convenience typedefs */ using Ptr = std::shared_ptr; using ConstPtr = std::shared_ptr; - static Ptr MakeShared() { return std::make_shared(); } + static Ptr MakeShared(double weight_input) { return std::make_shared(weight_input); } /** \brief Constructor */ - L2WeightedLossFunc() = default; + //L2WeightedLossFunc() = default; + + + L2WeightedLossFunc(double weight_input) + { + weight_val = weight_input; + } /** \brief Cost function (basic evaluation of the loss function) */ double cost(double whitened_error_norm) const override { @@ -25,7 +34,8 @@ class L2WeightedLossFunc : public BaseLossFunc { * \brief Weight for iteratively reweighted least-squares (influence function * div. by error) */ - double weight(double whitened_error_norm) const override { return 0.01; } + double weight(double whitened_error_norm) const override { return weight_val; } + }; } // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp index 749840c9c..135e80d99 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp @@ -55,7 +55,7 @@ class HomoPointErrorEvaluatorLeft : public Evaluable }; HomoPointErrorEvaluatorLeft::Ptr homo_point_error_left( - const Evaluable::ConstPtr& pt, + const Evaluable::ConstPtr& pt, const HomoPointErrorEvaluatorLeft::InType& meas_pt); diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp index a4758b6c2..849b0f31d 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp @@ -37,11 +37,15 @@ struct meas_result // Declaring helper functions // Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result GenerateReferenceMeas3(std::shared_ptr cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); + + // Helper function for post-processing and saturating the velocity command Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp index aaed54465..9b430c48f 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp @@ -68,13 +68,17 @@ void ScalarLogBarrierEvaluator::getRelatedVarKeys(KeySet& keys) const { template auto ScalarLogBarrierEvaluator::value() const -> OutType { - return log(v_->value()); + auto v_intermed = v_->value(); + v_intermed[0] = (v_->value()[0]); // TODO. need to validate this, not 100% sure this is the best/correct way to do this log + return v_intermed; } template auto ScalarLogBarrierEvaluator::forward() const -> typename Node::Ptr { const auto child = v_->forward(); - const auto value = log(child->value()); + auto value = child->value(); + value[0] = log((child->value())[0]); // TODO. need to validate this, not 100% sure this is the best/correct way to do this log + //const auto value = log((child->value()[0])); const auto node = Node::MakeShared(value); node->addChild(child); return node; @@ -86,7 +90,7 @@ void ScalarLogBarrierEvaluator::backward(const Eigen::MatrixXd& lhs, Jacobians& jacs) const { const auto child = std::static_pointer_cast>(node->at(0)); if (v_->active()) { - v_->backward(1.0 /(lhs), child, jacs); + v_->backward(lhs.inverse(), child, jacs); } } diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 19e94bfa7..79f8fe94a 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -257,11 +257,11 @@ void CBIT::initializeRoute(RobotState& robot_state) { CLOG(INFO, "path_planning.cbit") << "The path repeat direction is:" << path_direction; - CLOG(INFO, "path_planning.cbit") << "Trying to create global path"; + CLOG(ERROR, "path_planning.cbit") << "Trying to create global path"; // Create the path class object (Path preprocessing) CBITPath global_path(cbit_config, euclid_path_vec); // Make a pointer to this path - std::shared_ptr global_path_ptr = std::make_shared(global_path); + global_path_ptr = std::make_shared(global_path); CLOG(INFO, "path_planning.cbit") << "Teach Path has been pre-processed. Attempting to initialize the dynamic corridor"; @@ -413,12 +413,20 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { bool point_stabilization = meas_result.point_stabilization; + // Experimental, corridor MPC reference measurement generation: + CLOG(WARNING, "mpc.cbit") << "Attempting to generate T_ref measurements"; + auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, robot_pose, K, DT, VF, curr_sid); + auto measurements3 = meas_result3.measurements; + bool point_stabilization3 = meas_result3.point_stabilization; + // END of experimental code + + // Create and solve the STEAM optimization problem std::vector mpc_poses; try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC2(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; @@ -430,8 +438,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { applied_vel(1) = 0.0; } - CLOG(INFO, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); - + CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; diff --git a/main/src/vtr_path_planning/src/cbit/generate_pq.cpp b/main/src/vtr_path_planning/src/cbit/generate_pq.cpp index bc866773e..8402c49da 100644 --- a/main/src/vtr_path_planning/src/cbit/generate_pq.cpp +++ b/main/src/vtr_path_planning/src/cbit/generate_pq.cpp @@ -38,9 +38,12 @@ CBITPath::CBITPath(CBITConfig config, std::vector initial_path) int vect_size = disc_path.size(); p.reserve(vect_size); p.push_back(0); + sid.reserve(vect_size); + sid.push_back(0); for (int i=1; i previous_vel, lgmath::se3::Transformation T0, std::vector measurements, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) { // Conduct an MPC Iteration given current configurations @@ -62,6 +62,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup shared loss functions and noise models for all cost terms const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); + const auto lateralLossFunc = steam::L2WeightedLossFunc::MakeShared(0.01); + const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); @@ -76,9 +78,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Experimental lat constraint (hardcoded for now) Eigen::Matrix lat_noise_vect; - lat_noise_vect << 15.0; - const auto sharedLatNoiseModel = - steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); + lat_noise_vect << 100.0; + const auto sharedLatNoiseModel = steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); @@ -225,10 +226,10 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem - const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(lat_cost_term_right); - const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(lat_cost_term_left); + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, lateralLossFunc); + //opt_problem.addCostTerm(lat_cost_term_right); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, lateralLossFunc); + //opt_problem.addCostTerm(lat_cost_term_left); //CLOG(WARNING, "debug") << "Running the cbit one"; } @@ -252,9 +253,9 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Initialize parameters (enable verbose mode) SolverType::Params params; params.verbose = true; // Makes the output display for debug when true - //params.relative_cost_change_threshold = 0.0000000001; + params.relative_cost_change_threshold = 0.000001; //params.max_iterations = 500; - //params.absolute_cost_change_threshold = 0.0000000001; + params.absolute_cost_change_threshold = 0.000001; SolverType solver(opt_problem, params); solver.optimize(); @@ -427,6 +428,123 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } +// For generating VT&R teach path measurements +struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) +{ + // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths + CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; + CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + + // load the teach path + std::vector teach_path = global_path_ptr->disc_path; + //auto teach_p = global_path_ptr->p; + std::vector cbit_path; + // Get rid of any of the path before the current sid + + // handle case at start of path + if (current_sid - 1 < 0) + { + current_sid = 1; + } + // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot + for (int i = (current_sid-1); i < teach_path.size(); i++) + { + cbit_path.push_back(teach_path[i]); + } + + // Save a copy of the current path solution to work on + //auto cbit_path = *cbit_path_ptr; + + // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) + + // PSEUDO CODE: + // 1. Find the closest point on the cbit path to the current state of the robot + // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) + // 3. After we have our starting closest point on the path, assign that point a p value of 0. Compute the p values for each subsequent point in the lookahead window + // 4. using the desired p values, and the known p values, interpolate a, x,y,z,yaw, value each measurement + // 5. Create the proper measurement transform for each measurement and get it ready for the using with the optimization problem + + // Limiting the size of the cbit path based on the sliding window and then assigning p values + double lookahead_dist = 0.0; + double p_dist = 0.0; + + double min_dist = INFINITY; + double new_dist; + double dx; + double dy; + double p_correction = 0.0; + + std::vector cbit_p; + cbit_p.reserve(cbit_path.size()); + cbit_p.push_back(0.0); + for (int i = 0; i < (cbit_path.size()-2); i++) // the last value of vector is size()-1, so second to last will be size-2 + { + // calculate the p value for the point + p_dist = sqrt((((cbit_path)[i].x - (cbit_path)[i+1].x) * ((cbit_path)[i].x - (cbit_path)[i+1].x)) + (((cbit_path)[i].y - (cbit_path)[i+1].y) * ((cbit_path)[i].y - (cbit_path)[i+1].y))); + lookahead_dist = lookahead_dist + p_dist; + cbit_p.push_back(lookahead_dist); + + // Keep track of the closest point to the robot state + dx = (cbit_path)[i].x - std::get<0>(robot_pose); + dy = (cbit_path)[i].y - std::get<1>(robot_pose); + new_dist = sqrt((dx * dx) + (dy * dy)); + if (new_dist < min_dist) + { + CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + p_correction = lookahead_dist; + min_dist = new_dist; + } + + // Stop once we get about 12m ahead of the robot (magic number for now, but this is just a conservative estimate of any reasonable lookahead window and mpc horizon) + if (lookahead_dist > 12.0) + { + break; + } + } + //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + + // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state + std::vector p_meas_vec; + std::vector measurements; + p_meas_vec.reserve(K); + for (int i = 0; i < K; i++) + { + + p_meas_vec.push_back((i * DT * VF) + p_correction); + } + //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could be combined in the previous loop too + bool point_stabilization = false; + for (int i = 0; i < p_meas_vec.size(); i++) + { + // handle end of path case: + // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path + // This will cause the intepolation to return the final cbit_path pose for all measurements past this point + //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + + if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) + { + p_meas_vec[i] = cbit_p[cbit_p.size()-1]; + point_stabilization = true; // Enable point stabilization configs + CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; + } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + + + // add to measurement vector + measurements.push_back(meas); + } + + return {measurements, point_stabilization}; +} + + + // function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) From 18dd4f0f7875102bf81c02e7ed71d65054063294 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 4 Apr 2023 16:00:49 -0400 Subject: [PATCH 15/25] Controller updates and tuning as well as optimized planner collision checking functions for faster batches per second --- config/honeycomb_grizzly_default.yaml | 48 ++- launch/online_honeycomb_grizzly.launch.yaml | 2 +- .../path_planning/mpc_path_planner2.hpp | 13 +- main/src/vtr_lidar/src/path_planning/cbit.cpp | 49 ++- .../src/path_planning/mpc_path_planner2.cpp | 195 ++++++++---- .../include/vtr_path_planning/cbit/cbit.hpp | 6 + .../cbit/cbit_path_planner.hpp | 2 +- .../mpc/mpc_path_planner2.hpp | 12 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 34 ++- .../src/cbit/cbit_path_planner.cpp | 285 ++++++++++++------ .../src/mpc/mpc_path_planner2.cpp | 154 +++++----- rviz/honeycomb.rviz | 28 +- 12 files changed, 545 insertions(+), 283 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 1373ed1a5..99cddc551 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -19,7 +19,7 @@ # path planner #"path_planning", #"path_planning.teb", - "path_planning.cbit", + #"path_planning.cbit", #"path_planning.cbit_planner", #"mpc.cbit", #"mpc_debug.cbit", @@ -306,7 +306,7 @@ support_variance: 0.05 support_threshold: 2.5 - influence_distance: 0.50 # was 0.5 Jordy # Note that the total distancr=e where grid cells have values > 0 is min dist + influence dist, not influence dist! + influence_distance: 0.50 # was 0.5 Jordy # Note that the total distance where grid cells have values > 0 is min dist + influence dist, not influence dist! minimum_distance: 0.9 # was 0.3 Jordy # cost map @@ -409,7 +409,7 @@ ############ path planning configuration ############ path_planning: type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version - control_period: 50 # ms + control_period: 100 # ms teb: # vtr specific visualize: true @@ -465,21 +465,43 @@ mpc: # Controller Params - horizon_steps: 20 - horizon_step_size: 0.3 + horizon_steps: 15 + horizon_step_size: 0.15 forward_vel: 1.00 - max_lin_vel: 1.55 - max_ang_vel: 0.75 + max_lin_vel: 1.50 + max_ang_vel: 1.00 vehicle_model: "unicycle" + # Cost Function Covariance Matrices + #pose_error_cov: [0.75, 0.75, 0.75, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + #vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term + #acc_error_cov: [1.0, 1.0] + #kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] + #lat_error_cov: [100.0] + # Cost Function Weights - pose_error_cov: [0.75, 0.75, 0.75, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 - vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term - acc_error_cov: [1.0, 1.0] - kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] - lat_error_cov: [100.0] + #pose_error_weight: 1.0 + #acc_error_weight: 1.0 + #kin_error_weight: 1.0 + #lat_error_weight: 0.01 + + # Cost Function Covariance Matrices + #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 + pose_error_cov: [50.0, 50.0, 10000.0, 10000.0, 10000.0, 10000.0] + vel_error_cov: [500.0, 50.0] # was [0.1 2.0] # No longer using this cost term + #acc_error_cov: [2.0, 2.0] + acc_error_cov: [50.0, 50.0] + kin_error_cov: [50.0, 50.0, 50.0, 50.0, 50.0, 50.0] + #kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] + lat_error_cov: [50.0] - #backup + # Cost Function Weights + pose_error_weight: 2.0 + vel_error_weight: 1.0 + acc_error_weight: 1.0 + kin_error_weight: 10000.0 + lat_error_weight: 0.02 + #backup for tracking mpc #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 #vel_error_cov: [0.1, 1.0] # was [0.1 2.0] #acc_error_cov: [0.1, 0.75] diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index 7b4f2380a..95088af6f 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -22,7 +22,7 @@ windows: - > ros2 launch vtr_navigation vtr.launch.py base_params:=honeycomb_grizzly_default.yaml - data_dir:=${VTRDATA}/online-test-lidar/$(date '+%F')/$(date '+%F') + data_dir:=${VTRDATA}/online-test-lidar/2023-03-28/2023-03-28 start_new_graph:=false use_sim_time:=false diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp index db0393553..ebf193ed2 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp @@ -34,16 +34,25 @@ struct meas_result bool point_stabilization; }; +struct meas_result2 +{ + std::vector measurements; + bool point_stabilization; + std::vector barrier_q_left; + std::vector barrier_q_right; +}; + + // Declaring helper functions // Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity -struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); +struct meas_result2 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); // Helper function for post-processing and saturating the velocity command Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index b36fe0e1f..5cba410d7 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -77,7 +77,7 @@ auto LidarCBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std:: config->max_lin_vel = node->declare_parameter(prefix + ".mpc.max_lin_vel", config->max_lin_vel); config->max_ang_vel = node->declare_parameter(prefix + ".mpc.max_ang_vel", config->max_ang_vel); - // COST FUNCTION WEIGHTS + // COST FUNCTION Covariances const auto pose_error_diag = node->declare_parameter>(prefix + ".mpc.pose_error_cov", std::vector()); config->pose_error_cov.diagonal() << pose_error_diag[0], pose_error_diag[1], pose_error_diag[2], pose_error_diag[3], pose_error_diag[4], pose_error_diag[5]; @@ -93,6 +93,13 @@ auto LidarCBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std:: const auto lat_error_diag = node->declare_parameter>(prefix + ".mpc.lat_error_cov", std::vector()); config->lat_error_cov.diagonal() << lat_error_diag[0]; + // COST FUNCTION WEIGHTS + config->pose_error_weight = node->declare_parameter(prefix + ".mpc.pose_error_weight", config->pose_error_weight); + config->vel_error_weight = node->declare_parameter(prefix + ".mpc.vel_error_weight", config->vel_error_weight); + config->acc_error_weight = node->declare_parameter(prefix + ".mpc.acc_error_weight", config->acc_error_weight); + config->kin_error_weight = node->declare_parameter(prefix + ".mpc.kin_error_weight", config->kin_error_weight); + config->lat_error_weight = node->declare_parameter(prefix + ".mpc.lat_error_weight", config->lat_error_weight); + // MISC config->command_history_length = node->declare_parameter(prefix + ".mpc.command_history_length", config->command_history_length); @@ -138,7 +145,8 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // retrieve the transorm info from the localization chain const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; - + CLOG(INFO, "path_planning.cbit") << "The T_r_v_odo is: " << T_r_v_odo; + CLOG(INFO, "path_planning.cbit") << "The T_p_r is: " << T_p_r; //START OF OBSTACLE PERCEPTION UPDATES @@ -184,6 +192,11 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { CLOG(DEBUG, "obstacle_detection.cbit") << "The costmap to world transform is: " << T_start_vertex.inverse(); costmap_ptr->grid_resolution = change_detection_costmap->dl(); + + // Note I think all of the code below here was legacy when I was temporally filtering in cbit, now the most recent obs map should contain the history of the costmaps all embedded + // So in cbit_planner we only need to query the value of costmap_ptr->T_c_w and costmap_ptr->obs_map + + // Experimental: Storing sequences of costmaps for temporal filtering purposes // For the first x iterations, fill the obstacle vector if (costmap_ptr->obs_map_vect.size() < config_->costmap_history) @@ -264,6 +277,18 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { Eigen::Matrix kin_noise_vect; kin_noise_vect = config_->kin_error_cov; + // Lateral Constraint Covariance Weights + Eigen::Matrix lat_noise_vect; + lat_noise_vect = config_->lat_error_cov; + + // Cost term weights + double pose_error_weight = config_->pose_error_weight; + double vel_error_weight = config_->vel_error_weight; + double acc_error_weight = config_->acc_error_weight; + double kin_error_weight = config_->kin_error_weight; + double lat_error_weight = config_->lat_error_weight; + + // Extrapolating robot pose into the future by using the history of applied mpc velocity commands @@ -313,10 +338,10 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp); // Uncomment for using the mpc extrapolated robot pose for control - lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); + //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); // no extrapolation (comment this out if we are not using extrapolation) - //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); + lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); // TODO: Set whether to use mpc extrapolation as a config param (though there is almost never a good reason not to use it) @@ -337,13 +362,23 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { auto measurements = meas_result.measurements; bool point_stabilization = meas_result.point_stabilization; + // Experimental, corridor MPC reference measurement generation: + auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid); + auto measurements3 = meas_result3.measurements; + bool point_stabilization3 = meas_result3.point_stabilization; + std::vector barrier_q_left = meas_result3.barrier_q_left; + std::vector barrier_q_right = meas_result3.barrier_q_right; + // END of experimental code + // Create and solve the STEAM optimization problem std::vector mpc_poses; try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; @@ -355,13 +390,13 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { applied_vel(1) = 0.0; } - CLOG(INFO, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); + CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - + CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index 26050ce4c..f7dfd470d 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -29,7 +29,7 @@ // Main MPC problem solve function -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { // Conduct an MPC Iteration given current configurations @@ -60,25 +60,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup shared loss functions and noise models for all cost terms - const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); // The default L2 loss function weights all cost terms with a value of 1.0 + //const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); // The default L2 loss function weights all cost terms with a value of 1.0 (not using this one anymore) - const auto lateralLossFunc = steam::L2WeightedLossFunc::MakeShared(0.01); // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument + // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument + const auto poseLossFunc = steam::L2WeightedLossFunc::MakeShared(pose_error_weight); + const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); // todo, add a param for this + const auto accelLossFunc = steam::L2WeightedLossFunc::MakeShared(acc_error_weight); + const auto kinLossFunc = steam::L2WeightedLossFunc::MakeShared(kin_error_weight); + const auto latLossFunc = steam::L2WeightedLossFunc::MakeShared(lat_error_weight); - const auto sharedPoseNoiseModel = - steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); - const auto sharedVelNoiseModel = - steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); - - const auto sharedAccelNoiseModel = - steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); - - const auto sharedKinNoiseModel = - steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); - - // Experimental lat constraint (hardcoded for now) - Eigen::Matrix lat_noise_vect; - lat_noise_vect << 100.0; + const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); + const auto sharedVelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); + const auto sharedAccelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); + const auto sharedKinNoiseModel = steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); const auto sharedLatNoiseModel = steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); @@ -96,17 +91,17 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() vel_states.push_back(v0); - CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; - CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; - CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; + //CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; + //CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; + //CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; // Set the remaining states - for (int i=0; i previous_vel, lgmath::se { // Pose Error const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); - const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, sharedLossFunc); + const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); opt_problem.addCostTerm(pose_cost_term); // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) @@ -164,9 +159,13 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); - const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, kinLossFunc); opt_problem.addCostTerm(kin_cost_term); + // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); + opt_problem.addCostTerm(vel_cost_term); + // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) @@ -182,14 +181,14 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state - const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, sharedLossFunc); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } else { // Subsequent iterations we make an error between consecutive velocities. We penalize large changes in velocity between time steps const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); - const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } @@ -215,15 +214,21 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable Eigen::Matrix barrier_right; barrier_right << 0, - -2.5, + barrier_q_right[i], 0, 1; Eigen::Matrix barrier_left; barrier_left << 0.0, - 2.5, + barrier_q_left[i], 0, 1; + //CLOG(WARNING, "mpc.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; + //CLOG(WARNING, "mpc.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; + //CLOG(ERROR, "mpc.cbit") << "The Initaial Pose is:" << T0; + //CLOG(WARNING, "mpc.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); + //CLOG(ERROR, "mpc.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); @@ -232,36 +237,24 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem - const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, lateralLossFunc); + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, latLossFunc); opt_problem.addCostTerm(lat_cost_term_right); - const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, lateralLossFunc); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, latLossFunc); opt_problem.addCostTerm(lat_cost_term_left); //CLOG(WARNING, "debug") << "Running the cbit one"; - } - - - // Experimental Hard Lateral Constraints - /* - CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; - const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); - const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators - const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); - const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); - const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(lat_cost_term); - CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; - */ + + } } // Solve the optimization problem with GuassNewton solver using SolverType = steam::GaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; - params.verbose = true; // Makes the output display for debug when true - params.relative_cost_change_threshold = 0.000001; - //params.max_iterations = 500; - params.absolute_cost_change_threshold = 0.000001; + params.verbose = false; // Makes the output display for debug when true + params.relative_cost_change_threshold = 1e-6; + params.max_iterations = 250; + params.absolute_cost_change_threshold = 1e-6; SolverType solver(opt_problem, params); solver.optimize(); @@ -402,7 +395,7 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi p_meas_vec.push_back((i * DT * VF) + p_correction); } //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; - + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could be combined in the previous loop too bool point_stabilization = false; @@ -435,29 +428,93 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } // For generating VT&R teach path measurements -struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) +struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) { // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths - CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; - CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + + // Initialize vectors storing the barrier values: + std::vector barrier_q_left; + std::vector barrier_q_right; // load the teach path std::vector teach_path = global_path_ptr->disc_path; + //auto teach_p = global_path_ptr->p; std::vector cbit_path; - // Get rid of any of the path before the current sid + // Get rid of any of the path before the current sid // handle case at start of path if (current_sid - 1 < 0) { current_sid = 1; } + + // Store the global p value of the previous sid. This is the point I use as my zero reference for the remaining p's + double sid_p = global_path_ptr->p[current_sid-1]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; + // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot for (int i = (current_sid-1); i < teach_path.size(); i++) { cbit_path.push_back(teach_path[i]); } + //DEBUG: + //CLOG(WARNING, "mpc_debug.cbit") << "The robot is at: x: " << std::get<0>(robot_pose) << " y: " << std::get<1>(robot_pose); + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 0 x: " << teach_path[0].x << " y: " << teach_path[0].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 1 x: " << teach_path[1].x << " y: " << teach_path[1].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 2 x: " << teach_path[2].x << " y: " << teach_path[2].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 3 x: " << teach_path[3].x << " y: " << teach_path[3].y; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + + double x2; + double x1; + double y2; + double y1; + double p_val_ref; + + if (current_sid == 0) + { + x2 = teach_path[1].x; + x1 = teach_path[0].x; + y2 = teach_path[1].y; + y1 =teach_path[0].x; + p_val_ref = global_path_ptr->p[0]; + } + else + { + x2 = teach_path[current_sid+1].x; + x1 = teach_path[current_sid-1].x; + y2 = teach_path[current_sid+1].y; + y1 = teach_path[current_sid-1].y; + p_val_ref = global_path_ptr->p[current_sid-1]; + } + double xp = std::get<0>(robot_pose); + double yp = std::get<1>(robot_pose); + + //double dist1 = sqrt(((xp-x1) * (xp-x1)) + ((yp-y1) * (yp-y1))); + //double dist2 = sqrt(((xp-x2) * (xp-x2)) + ((yp-y2) * (yp-y2))); + double total_dist = sqrt(((x2-x1) * (x2-x1)) + ((y2-y1) * (y2-y1))); + + double t = (((xp-x1)*(x2-x1)) + ((yp-y1)*(y2-y1))) / total_dist; + double p_robot = t; + //double cos_angle = (dist2*dist2 + total_dist*total_dist - dist1*dist1) / (2.0*dist2*total_dist); + //double xp_proj = x2 + (x1 - x2) * cos_angle; + //double yp_proj = y2 + (y1 - y2) * cos_angle; + //double p_robot = sqrt(((xp_proj-x1) * (xp_proj-x1)) + (yp_proj-y1) * (yp_proj-y1)) + p_val_ref; + //CLOG(WARNING, "mpc_debug.cbit") << "Projected Pose is x: " << xp_proj<< " y: " << yp_proj; + //CLOG(WARNING, "mpc_debug.cbit") << "The starting robot pose p is: " << p_robot; + + + + //End of Debug + + // Save a copy of the current path solution to work on //auto cbit_path = *cbit_path_ptr; @@ -516,9 +573,10 @@ struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ for (int i = 0; i < K; i++) { - p_meas_vec.push_back((i * DT * VF) + p_correction); + //p_meas_vec.push_back((i * DT * VF) + p_correction); + p_meas_vec.push_back((i * DT * VF) + p_robot); } - //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(WARNING, "debug") << "p_meas_vec is: " << p_meas_vec; // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could be combined in the previous loop too @@ -538,15 +596,38 @@ struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ point_stabilization = true; // Enable point stabilization configs CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); - CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; // add to measurement vector measurements.push_back(meas); + + + + + // Find the corresponding left and right barrier q values to pass to the mpc + + // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) + // To find the corresponding values, we just need to query the corridor_ptr given the current sid_p + p_meas_vec[i], and return the q values for that bin + double p_query = sid_p + p_meas_vec[i]; + // this isnt the most efficient way of doing this, but it should be fine, we really only need to run this loop 10-20 times and the size is likely less then 1000 each + int p_ind = 0; + while (corridor_ptr->p_bins[p_ind] <= p_query) + { + p_ind++; + } + barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); + barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); + //CLOG(WARNING, "debug") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "debug") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; + + + } - return {measurements, point_stabilization}; + return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; } diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index c7018b864..39d5c2b55 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -111,6 +111,12 @@ class CBIT : public BasePathPlanner { Eigen::Matrix kin_error_cov = Eigen::Matrix::Zero(); Eigen::Matrix lat_error_cov = Eigen::Matrix::Zero(); + // MPC weight params: + double pose_error_weight = 1.0; + double vel_error_weight = 1.0; + double acc_error_weight = 1.0; + double kin_error_weight = 1.0; + double lat_error_weight = 0.01; // Misc int command_history_length = 100; diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index e1e84d5ea..980681d76 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -137,5 +137,5 @@ class CBITPlanner { bool bool_result; Node col_node; }; - struct collision_result discrete_collision_v2(double discretization, Node start, Node end); + struct collision_result discrete_collision_v2(double discretization, Node start, Node end, bool tight = false); }; \ No newline at end of file diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp index 849b0f31d..ce84a3571 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp @@ -34,16 +34,24 @@ struct meas_result bool point_stabilization; }; +struct meas_result3 +{ + std::vector measurements; + bool point_stabilization; + std::vector barrier_q_left; + std::vector barrier_q_right; +}; + // Declaring helper functions // Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization); +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity -struct meas_result GenerateReferenceMeas3(std::shared_ptr cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); +struct meas_result3 GenerateReferenceMeas3(std::shared_ptr cbit_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); // Helper function for post-processing and saturating the velocity command diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 79f8fe94a..670b2ba96 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -81,7 +81,7 @@ auto CBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std::strin config->max_lin_vel = node->declare_parameter(prefix + ".mpc.max_lin_vel", config->max_lin_vel); config->max_ang_vel = node->declare_parameter(prefix + ".mpc.max_ang_vel", config->max_ang_vel); - // COST FUNCTION WEIGHTS + // COST FUNCTION COVARIANCE const auto pose_error_diag = node->declare_parameter>(prefix + ".mpc.pose_error_cov", std::vector()); config->pose_error_cov.diagonal() << pose_error_diag[0], pose_error_diag[1], pose_error_diag[2], pose_error_diag[3], pose_error_diag[4], pose_error_diag[5]; @@ -96,6 +96,14 @@ auto CBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std::strin const auto lat_error_diag = node->declare_parameter>(prefix + ".mpc.lat_error_cov", std::vector()); config->lat_error_cov.diagonal() << lat_error_diag[0]; + + // COST FUNCTION WEIGHTS + config->pose_error_weight = node->declare_parameter(prefix + ".mpc.pose_error_weight", config->pose_error_weight); + config->vel_error_weight = node->declare_parameter(prefix + ".mpc.vel_error_weight", config->vel_error_weight); + config->acc_error_weight = node->declare_parameter(prefix + ".mpc.acc_error_weight", config->acc_error_weight); + config->kin_error_weight = node->declare_parameter(prefix + ".mpc.kin_error_weight", config->kin_error_weight); + config->lat_error_weight = node->declare_parameter(prefix + ".mpc.lat_error_weight", config->lat_error_weight); + // MISC config->command_history_length = node->declare_parameter(prefix + ".mpc.command_history_length", config->command_history_length); @@ -293,7 +301,8 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // retrieve the transorm info from the localization chain const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; - + CLOG(INFO, "path_planning.cbit") << "The T_r_v_odo is: " << T_r_v_odo; + CLOG(INFO, "path_planning.cbit") << "The T_p_r is: " << T_p_r; // Extrapolate the pose of the robot into the future based on the localization delay prev_stamp = stamp; const auto curr_time = now(); // always in nanoseconds @@ -339,6 +348,13 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { Eigen::Matrix lat_noise_vect; lat_noise_vect = config_->lat_error_cov; + // Cost term weights + double pose_error_weight = config_->pose_error_weight; + double vel_error_weight = config_->vel_error_weight; + double acc_error_weight = config_->acc_error_weight; + double kin_error_weight = config_->kin_error_weight; + double lat_error_weight = config_->lat_error_weight; + // Extrapolating robot pose into the future by using the history of applied mpc velocity commands @@ -388,10 +404,10 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp); // Uncomment for using the mpc extrapolated robot pose for control - lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); + //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); // no extrapolation (comment this out if we are not using extrapolation) - //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); + lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); // TODO: Set whether to use mpc extrapolation as a config param (though there is almost never a good reason not to use it) @@ -415,9 +431,11 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // Experimental, corridor MPC reference measurement generation: CLOG(WARNING, "mpc.cbit") << "Attempting to generate T_ref measurements"; - auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, robot_pose, K, DT, VF, curr_sid); + auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid); auto measurements3 = meas_result3.measurements; bool point_stabilization3 = meas_result3.point_stabilization; + std::vector barrier_q_left = meas_result3.barrier_q_left; + std::vector barrier_q_right = meas_result3.barrier_q_right; // END of experimental code @@ -426,7 +444,9 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; - auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; CLOG(INFO, "mpc.cbit") << "Successfully solved MPC problem"; @@ -443,7 +463,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // If required, saturate the output velocity commands based on the configuration limits CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - + CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 495074a72..bd0b0b9a8 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -530,7 +530,8 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Store the Euclidean solution in the shared pointer memory (vector of Pose classes) so it can be accessed in the CBIT class //std::cout << "Made it just before extracting euclid path" << std::endl; std::vector euclid_path = ExtractEuclidPath(); - *cbit_path_ptr = euclid_path; + //*cbit_path_ptr = euclid_path; // previously I update the cbit path ptr right away, but I think that because there is quite abit of delay in the corridor update process, I should wait to update them at the same time + // This helps prevent synchronization issues // Reset the start time start_time = std::chrono::high_resolution_clock::now(); @@ -630,11 +631,15 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // EXPERIMENTAL: Updating the dynamic corridor (now that we know our path is collision free): - //update_corridor(corridor_ptr, path_x, path_y, *p_goal); - - + //auto corridor_start_time = std::chrono::high_resolution_clock::now(); + update_corridor(corridor_ptr, path_x, path_y, *p_goal); + //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); + //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); + //CLOG(ERROR, "path_planning.cbit_planner") << "Corridor Update Time: " << duration_corridor.count() << "us"; } + *cbit_path_ptr = euclid_path; // Update the pointer to point to the latest euclidean path update + } //std::cout << "Made it just before prune" << std::endl; Prune(p_goal->g_T, p_goal->g_T_weighted); @@ -1093,8 +1098,8 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) //int pre_seeds = abs(p_goal->p - p_start->p) / 0.25; int pre_seeds = abs(p_zero - p_start->p) / 0.25; // Note needed to change p_goal to p_zero. When the sliding window padding is large, pre-seeds wont get generated all the way to the goal - std::cout << "generating pre-seeds - number is:" << pre_seeds << std::endl; - std::cout << "The size of the tree is:" << tree.V.size() << std::endl; + //std::cout << "generating pre-seeds - number is:" << pre_seeds << std::endl; + //std::cout << "The size of the tree is:" << tree.V.size() << std::endl; // In the python version I do this line thing which is more robust, but for now im going to do this quick and dirty double p_step = 0.25; @@ -2104,39 +2109,39 @@ bool CBITPlanner::costmap_col_tight(Node node) // I am no longer temporally filtering here, instead this takes place in the costmap itself in the change detection module // This means the costmap and transform size should never be more than 1 - for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) - { + //for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) + //{ - auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; + auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; - // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map - float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; - float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map + float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; - float grid_value; + float grid_value; - // Check to see if the point is in the obstacle map - // We need to use a try/catch in this metod as if the key value pair doesnt exist (it usually wont) we catch an out of range error - try - { - // Block of code to try - grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); - //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; - } - catch (std::out_of_range) - { - grid_value = 0.0; - } + // Check to see if the point is in the obstacle map + // We need to use a try/catch in this metod as if the key value pair doesnt exist (it usually wont) we catch an out of range error + try + { + // Block of code to try + grid_value = cbit_costmap_ptr->obs_map.at(std::pair (x_key, y_key)); + //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; + } + catch (std::out_of_range) + { + grid_value = 0.0; + } - if (grid_value >= 0.89) // By switching this from > 0.0 to 0.9, we effectively only collision check the path out to the "minimum_distance" obs config param - { - return true; - } + if (grid_value >= 0.89) // By switching this from > 0.0 to 0.9, we effectively only collision check the path out to the "minimum_distance" obs config param + { + return true; } + //} // If we make it here can return false for no collision return false; @@ -2149,47 +2154,48 @@ bool CBITPlanner::costmap_col(Node node) { //CLOG(DEBUG, "path_planning.cbit_planner") << "Original Node: x: " << node.p << " y: " << node.q << " z: " << node.z; Eigen::Matrix test_pt({node.p, node.q, node.z, 1}); + // I am no longer temporally filtering here, instead this takes place in the costmap itself in the change detection module + // This means the costmap and transform size should never be more than 1 + //for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) + //{ - for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) - { + //auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; + auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Displaying the point in the costmap frame we are trying to collision check: " << collision_pt; + //CLOG(DEBUG, "path_planning.cbit_planner") << "X: " << collision_pt[0]; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y: " << collision_pt[1]; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Resolution: " << cbit_costmap_ptr->grid_resolution; - //auto collision_pt = cbit_costmap_ptr->T_c_w * test_pt; - auto collision_pt = cbit_costmap_ptr->T_c_w_vect[i] * test_pt; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Displaying the point in the costmap frame we are trying to collision check: " << collision_pt; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X: " << collision_pt[0]; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Y: " << collision_pt[1]; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Resolution: " << cbit_costmap_ptr->grid_resolution; + // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map + float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; - // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map - float x_key = floor(collision_pt[0] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; - float y_key = floor(collision_pt[1] / cbit_costmap_ptr->grid_resolution) * cbit_costmap_ptr->grid_resolution; + float grid_value; - //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; - //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; + // Check to see if the point is in the obstacle map + // If it isnt in the map we will catch an out of range error + try + { + grid_value = cbit_costmap_ptr->obs_map.at(std::pair (x_key, y_key)); + //CLOG(DEBUG, "debug") << " The Grid value for this cell is: " << grid_value; + } + + catch (std::out_of_range) + { - float grid_value; + grid_value = 0.0; + } - // Check to see if the point is in the obstacle map - // If it isnt in the map we will catch an out of range error - try - { - grid_value = cbit_costmap_ptr->obs_map_vect[i].at(std::pair (x_key, y_key)); - //CLOG(DEBUG, "debug") << " The Grid value for this cell is: " << grid_value; - } - - catch (std::out_of_range) + if (grid_value > 0.0) { - - grid_value = 0.0; + return true; } - if (grid_value > 0.0) - { - return true; - } - - } + //} // If we make it here, return false for no collision return false; } @@ -2246,7 +2252,9 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl // The only thing keeping me from doing this right away is that all the curvilinear conversions and collision checkers are here // But long term I should move all those over to utils or even a different header and script -struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end) +// Old code I wrote for this function +/* +struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end, bool tight) { // We dynamically determine the discretization based on the length of the edge discretization = round(calc_dist(start, end) * discretization); @@ -2281,18 +2289,89 @@ struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double d //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION Node euclid_pt = curve_to_euclid(curv_pt); //if (is_inside_obs(obs, euclid_pt)) - if (costmap_col(euclid_pt)) + if (tight == false) { + if (costmap_col(euclid_pt)) + { return {true, curv_pt}; + } } + else + { + if (costmap_col_tight(euclid_pt)) + { + return {true, curv_pt}; + } + } + + } + + return {false, curv_pt}; +} +*/ + +// ChatGPT optimized code for this function thats 10-20x faster lol +struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end, bool tight) +{ + // We dynamically determine the discretization based on the length of the edge + discretization = round(calc_dist(start, end) * discretization); + + // Generate discretized test nodes + std::vector p_test; + std::vector q_test; + + double p_diff = end.p - start.p; + double q_diff = end.q - start.q; + double p_sign = sgn(p_diff); + double q_sign = sgn(q_diff); + double p_step = p_sign * fabs(p_diff) / discretization; + double q_step = q_sign * fabs(q_diff) / discretization; + + p_test.push_back(start.p); + q_test.push_back(start.q); + + for (auto it = p_test.begin(), jt = q_test.begin(); it != p_test.end() - 1; ++it, ++jt) + { + *std::next(it) = *it + p_step * p_sign; + *std::next(jt) = *jt + q_step * q_sign; } + p_test.push_back(end.p); + q_test.push_back(end.q); + // Loop through the test curvilinear points, convert to euclid, collision check obstacles + Node curv_pt; + for (auto it = p_test.begin(), jt = q_test.begin(); it != p_test.end(); ++it, ++jt) + { + curv_pt = Node(*it, *jt); + + // Convert to euclid TODO: + //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION + Node euclid_pt = curve_to_euclid(curv_pt); + //if (is_inside_obs(obs, euclid_pt)) + if (tight == false) + { + if (costmap_col(euclid_pt)) + { + return {true, curv_pt}; + } + } + else + { + if (costmap_col_tight(euclid_pt)) + { + return {true, curv_pt}; + } + } + } return {false, curv_pt}; } + + // dont love how this function is coded, could be done much more efficiently with some more effort I think void CBITPlanner::update_corridor(std::shared_ptr corridor, std::vector homotopy_p, std::vector homotopy_q, Node robot_state) { + //auto corridor_start_time = std::chrono::high_resolution_clock::now(); // Reset q_left and q_right (I did this on the python side, but I dont know why exactly, in theory I should be able to just // update it incrementally in a sliding window? I must have had a reason for it but ill leave it out for now). // Ah the reason i did this was because I was not handling the no collision case (in which we reset q_left to q_right for that bin to the max) @@ -2324,6 +2403,7 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v double q_lower; for (int i = 0; i < p_bins_subset.size(); i++) { + int ind_counter = 0; // iterate through the current path solution (in p,q space) @@ -2352,19 +2432,26 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v // Note long term we need to handle special case when the start point is in a wormhole region, but for now should be fine //Node euclid_start = CBITPlanner::curve_to_euclid(start); - // debug, testing to see how often the bit* point is in a collision by the time we get here (in theory this should never happen but I think it is) - auto test_check = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, start); - if (test_check.bool_result == true) + //auto test_check = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, start); + //if (test_check.bool_result == true) + //{ + // CLOG(ERROR, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; + // continue; + //} + + Node euclid_pt = curve_to_euclid(start); + if (costmap_col_tight(euclid_pt)) { - CLOG(WARNING, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; - break; + //CLOG(ERROR, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; + continue; } + // collision check left and right using a special version of discrete_collision check // In this version we output both a boolean and the 1st point that comes into collision if there is one - auto collision_check_result1 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_left); - auto collision_check_result2 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_right); + auto collision_check_result1 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_left, true); + auto collision_check_result2 = discrete_collision_v2(corridor->curv_to_euclid_discretization, start, end_right, true); // if there is a collision, set q_left at the location of the current p_bin being processed to the value of q_left/q_right if (collision_check_result1.bool_result == true) @@ -2421,38 +2508,38 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v } - // Updating the full euclidean corridor vectors by iterating through all bins. + // Updating the full euclidean corridor vectors by iterating through all bins. - // TODO: Make whether we do this a configurable parameter, I realised that I dont actually need this from the control perspective at all - // Its purely for visualization purposes and it will waste some compute (not much though so im not too concerned for everyday use) + // TODO: Make whether we do this a configurable parameter, I realised that I dont actually need this from the control perspective at all + // Its purely for visualization purposes and it will waste some compute (not much though so im not too concerned for everyday use) - // Note for convenience I can do this in a separate loop, but really we could also be doing this incrementally the same way we update - // q_left/q_right. This requires a proper initialization of the euclid corridor in generate_pq.cpp, which is certainly possible - // But not currently because the curve_to_euclid is not in the utils package. When I change that we can do this. Until then, brute force it is. + // Note for convenience I can do this in a separate loop, but really we could also be doing this incrementally the same way we update + // q_left/q_right. This requires a proper initialization of the euclid corridor in generate_pq.cpp, which is certainly possible + // But not currently because the curve_to_euclid is not in the utils package. When I change that we can do this. Until then, brute force it is. - // Benchmarking the compute time for this operation since its likely much less efficient then it could be with the incremental approach - // Need to first clear out the old corridor, otherwise it just keeps stacking - corridor->x_left.clear(); - corridor->y_left.clear(); - corridor->x_right.clear(); - corridor->y_right.clear(); - //auto corridor_start_time = std::chrono::high_resolution_clock::now(); - for (int i = 0; i < corridor->p_bins.size(); i++) - { - Node euclid_left = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_left[i])); - Node euclid_right = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_right[i])); - - //CLOG(ERROR, "path_planning.corridor_debug") << "Euclid Left is x: " << euclid_left.p << " y: " << euclid_right.p; - corridor->x_left.push_back(euclid_left.p); - corridor->y_left.push_back(euclid_left.q); - corridor->x_right.push_back(euclid_right.p); - corridor->y_right.push_back(euclid_right.q); - } + // Benchmarking the compute time for this operation since its likely much less efficient then it could be with the incremental approach + // Need to first clear out the old corridor, otherwise it just keeps stacking + corridor->x_left.clear(); + corridor->y_left.clear(); + corridor->x_right.clear(); + corridor->y_right.clear(); + //auto corridor_start_time = std::chrono::high_resolution_clock::now(); + for (int i = 0; i < corridor->p_bins.size(); i++) + { + Node euclid_left = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_left[i])); + Node euclid_right = curve_to_euclid(Node(corridor->p_bins[i],corridor->q_right[i])); + + //CLOG(ERROR, "path_planning.corridor_debug") << "Euclid Left is x: " << euclid_left.p << " y: " << euclid_right.p; + corridor->x_left.push_back(euclid_left.p); + corridor->y_left.push_back(euclid_left.q); + corridor->x_right.push_back(euclid_right.p); + corridor->y_right.push_back(euclid_right.q); + } - //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); - //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); - //CLOG(ERROR, "path_planning.corridor_debug") << "Corridor Update Time: " << duration_corridor.count() << "ms"; - + //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); + //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); + //CLOG(ERROR, "path_planning.corridor_debug") << "Corridor Update Time: " << duration_corridor.count() << "ms"; + } diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index 30eafa590..952418140 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -29,7 +29,7 @@ // Main MPC problem solve function -struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, int K, double DT, double VF, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization) +struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { // Conduct an MPC Iteration given current configurations @@ -60,25 +60,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup shared loss functions and noise models for all cost terms - const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); + //const auto sharedLossFunc = steam::L2LossFunc::MakeShared(); // The default L2 loss function weights all cost terms with a value of 1.0 (not using this one anymore) - const auto lateralLossFunc = steam::L2WeightedLossFunc::MakeShared(0.01); + // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument + const auto poseLossFunc = steam::L2WeightedLossFunc::MakeShared(pose_error_weight); + const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); // todo, add a param for this + const auto accelLossFunc = steam::L2WeightedLossFunc::MakeShared(acc_error_weight); + const auto kinLossFunc = steam::L2WeightedLossFunc::MakeShared(kin_error_weight); + const auto latLossFunc = steam::L2WeightedLossFunc::MakeShared(lat_error_weight); - const auto sharedPoseNoiseModel = - steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); - const auto sharedVelNoiseModel = - steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); - - const auto sharedAccelNoiseModel = - steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); - - const auto sharedKinNoiseModel = - steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); - - // Experimental lat constraint (hardcoded for now) - Eigen::Matrix lat_noise_vect; - lat_noise_vect << 100.0; + const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); + const auto sharedVelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); + const auto sharedAccelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); + const auto sharedKinNoiseModel = steam::StaticNoiseModel<6>::MakeShared(kin_noise_vect); const auto sharedLatNoiseModel = steam::StaticNoiseModel<1>::MakeShared(lat_noise_vect); @@ -96,11 +91,17 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() vel_states.push_back(v0); + //CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; + //CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; + //CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; + + // Set the remaining states for (int i=0; i previous_vel, lgmath::se for (int i = 0; i < K; i++) { - pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); + pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); } @@ -141,7 +142,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se { // Pose Error const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); - const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, sharedLossFunc); + const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); opt_problem.addCostTerm(pose_cost_term); // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) @@ -158,9 +159,13 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto scaled_vel_proj = steam::vspace::ScalarMultEvaluator<6>::MakeShared(vel_proj, DT); const auto rhs = steam::se3::ExpMapEvaluator::MakeShared(scaled_vel_proj); const auto kin_error_func = steam::se3::LogMapEvaluator::MakeShared(steam::se3::ComposeInverseEvaluator::MakeShared(lhs, rhs)); - const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, sharedLossFunc); + const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, kinLossFunc); opt_problem.addCostTerm(kin_cost_term); + // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); + opt_problem.addCostTerm(vel_cost_term); + // Experimental velocity set-point constraint (instead of non zero velocity penalty) // Only add this cost term if we are not in point stabilization mode (end of path) @@ -176,19 +181,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state - const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, sharedLossFunc); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i], previous_vel), sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } else { // Subsequent iterations we make an error between consecutive velocities. We penalize large changes in velocity between time steps const auto accel_diff = steam::vspace::AdditionEvaluator<2>::MakeShared(vel_state_vars[i], steam::vspace::NegationEvaluator<2>::MakeShared(vel_state_vars[i-1])); - const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, sharedLossFunc); + const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } } + // Laterial Barrier State Constraints if (i > 0) { @@ -226,33 +232,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem - const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, lateralLossFunc); - //opt_problem.addCostTerm(lat_cost_term_right); - const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, lateralLossFunc); - //opt_problem.addCostTerm(lat_cost_term_left); + const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, latLossFunc); + opt_problem.addCostTerm(lat_cost_term_right); + const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, latLossFunc); + opt_problem.addCostTerm(lat_cost_term_left); //CLOG(WARNING, "debug") << "Running the cbit one"; } - - - - // Experimental Hard Lateral Constraints - /* - CLOG(DEBUG, "debug") << "Attempting to add a lateral cost term"; - const auto inv_state = steam::se3::InverseEvaluator::MakeShared(pose_state_vars[i]); - const auto pose_err = steam::vspace::MatrixMultEvaluator<4,4>::MakeShared(inv_state, measurements[i]); // I think this line is the problem, we cannot really convert the se3 evaluators to vspace evaluators - const auto left_mul = steam::vspace::MatrixMultEvaluator<1,4>::MakeShared(pose_err, I_2_tran); - const auto right_mul = steam::vspace::MatrixMultEvaluator<4,1>::MakeShared(pose_err, I_4); - const auto lat_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(right_mul, sharedLatNoiseModel, sharedLossFunc); - opt_problem.addCostTerm(lat_cost_term); - CLOG(DEBUG, "debug") << "Successfully added Lat cost term"; - */ + } // Solve the optimization problem with GuassNewton solver using SolverType = steam::GaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; - params.verbose = true; // Makes the output display for debug when true + params.verbose = false; // Makes the output display for debug when true params.relative_cost_change_threshold = 0.000001; //params.max_iterations = 500; params.absolute_cost_change_threshold = 0.000001; @@ -429,23 +422,33 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } // For generating VT&R teach path measurements -struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) +struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) { // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths - CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; - CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; + + // Initialize vectors storing the barrier values: + std::vector barrier_q_left; + std::vector barrier_q_right; // load the teach path std::vector teach_path = global_path_ptr->disc_path; + //auto teach_p = global_path_ptr->p; std::vector cbit_path; - // Get rid of any of the path before the current sid + // Get rid of any of the path before the current sid // handle case at start of path if (current_sid - 1 < 0) { current_sid = 1; } + + // Store the global p value of the previous sid. This is the point I use as my zero reference for the remaining p's + double sid_p = global_path_ptr->p[current_sid-1]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; + // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot for (int i = (current_sid-1); i < teach_path.size(); i++) { @@ -512,7 +515,7 @@ struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ p_meas_vec.push_back((i * DT * VF) + p_correction); } - //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(WARNING, "debug") << "p_meas_vec is: " << p_meas_vec; // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could be combined in the previous loop too @@ -533,14 +536,36 @@ struct meas_result GenerateReferenceMeas3(std::shared_ptr global_path_ CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); - CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; // add to measurement vector measurements.push_back(meas); + + + + + // Find the corresponding left and right barrier q values to pass to the mpc + + // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) + // To find the corresponding values, we just need to query the corridor_ptr given the current sid_p + p_meas_vec[i], and return the q values for that bin + double p_query = sid_p + p_meas_vec[i]; + // this isnt the most efficient way of doing this, but it should be fine, we really only need to run this loop 10-20 times and the size is likely less then 1000 each + int p_ind = 0; + while (corridor_ptr->p_bins[p_ind] <= p_query) + { + p_ind++; + } + barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); + barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); + //CLOG(WARNING, "debug") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "debug") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; + + + } - return {measurements, point_stabilization}; + return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; } @@ -657,34 +682,3 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel } - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index b14ad2ee1..872da4ab7 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -140,7 +140,7 @@ Visualization Manager: robot planning (extrapolated): Value: false robot planning (extrapolated) mpc: - Value: true + Value: false sensor_anchor_link: Value: false velodyne: @@ -1217,7 +1217,7 @@ Visualization Manager: Buffer Length: 1 Class: rviz_default_plugins/Path Color: 0; 0; 0 - Enabled: false + Enabled: true Head Diameter: 0.30000001192092896 Head Length: 0.20000000298023224 Length: 0.30000001192092896 @@ -1240,7 +1240,7 @@ Visualization Manager: History Policy: Keep Last Reliability Policy: Reliable Value: /vtr/robot_path - Value: false + Value: true - Alpha: 0.699999988079071 Class: rviz_default_plugins/Map Color Scheme: costmap @@ -1266,7 +1266,7 @@ Visualization Manager: Class: rviz_default_plugins/Map Color Scheme: raw Draw Behind: false - Enabled: true + Enabled: false Name: filtered_obstacle_map Topic: Depth: 5 @@ -1282,7 +1282,7 @@ Visualization Manager: Reliability Policy: Reliable Value: /vtr/filtered_change_detection_costmap_updates Use Timestamp: false - Value: true + Value: false - Alpha: 1 Buffer Length: 1 Class: rviz_default_plugins/Path @@ -1385,25 +1385,25 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 19.4349422454834 + Distance: 26.911510467529297 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: 0.722629964351654 - Y: 0.6375535130500793 - Z: -0.5986025333404541 + X: 0.1747203767299652 + Y: 0.14160969853401184 + Z: -0.6141221523284912 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 - Pitch: 1.3647969961166382 + Pitch: 1.5697963237762451 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 4.070685863494873 + Yaw: 3.225677490234375 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 @@ -1607,11 +1607,11 @@ Visualization Manager: Yaw: 2.295583963394165 Window Geometry: Displays: - collapsed: false + collapsed: true Height: 1016 - Hide Left Dock: false + Hide Left Dock: true Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004b1000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d0065010000000000000450000000000000000000000738000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: From 7877004d9fb25dcec6e91289147f2841a5e6b978 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 10 Apr 2023 11:27:12 -0400 Subject: [PATCH 16/25] Updates for low level controller calibration experiment --- config/honeycomb_grizzly_default.yaml | 3 +- launch/online_honeycomb_grizzly.launch.yaml | 4 +- .../modules/odometry/odometry_icp_module.cpp | 7 + main/src/vtr_lidar/src/path_planning/cbit.cpp | 12 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 12 +- .../src/cbit/cbit_path_planner.cpp | 6 +- .../urdf/accessories/honeycomb.urdf.xacro | 4 +- rviz/honeycomb.rviz | 138 +++++++++++++----- 8 files changed, 139 insertions(+), 47 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 99cddc551..b9c6d7d17 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -46,6 +46,7 @@ # "lidar.ground_extraction", # "lidar.obstacle_detection", # "lidar.terrain_assessment", + "grizzly_controller_tests.cbit" ] # control frame from the urdf @@ -409,7 +410,7 @@ ############ path planning configuration ############ path_planning: type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version - control_period: 100 # ms + control_period: 50 # ms teb: # vtr specific visualize: true diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index 95088af6f..5075efe5f 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -8,7 +8,7 @@ environment: # data_dir:=${VTRDATA}/online-test-lidar/2022-09-08/2022-09-08 # data_dir:=${VTRDATA}/online-test-lidar/2023-01-18/2023-01-18 # Nice dome path # data_dir:=${VTRDATA}/online-test-lidar/2023-02-20/2023-02-20 # Nice demo path from utias bay - + # data_dir:=${VTRDATA}/online-test-lidar/2023-03-28/2023-03-28 start_directory: ${VTRDATA} suppress_history: false @@ -22,7 +22,7 @@ windows: - > ros2 launch vtr_navigation vtr.launch.py base_params:=honeycomb_grizzly_default.yaml - data_dir:=${VTRDATA}/online-test-lidar/2023-03-28/2023-03-28 + data_dir:=${VTRDATA}/online-test-lidar/$(date '+%F')/$(date '+%F') start_new_graph:=false use_sim_time:=false diff --git a/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp b/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp index ac7620c92..b7252be03 100644 --- a/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp +++ b/main/src/vtr_lidar/src/modules/odometry/odometry_icp_module.cpp @@ -509,6 +509,13 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, // /// \todo double check validity when no vertex has been created *qdata.T_r_v_odo = T_r_m_icp * sliding_map_odo.T_vertex_this().inverse(); + + // Temporary modification by Jordy to test calibration of hte grizzly controller + //CLOG(DEBUG, "grizzly_controller_tests.cbit") << "The Odometry Velocity is: " << *qdata.w_m_r_in_r_odo * -1; + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Odom Linear Velocity: " << (*qdata.w_m_r_in_r_odo * -1)(0); + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Odom Angular Velocity: " << (*qdata.w_m_r_in_r_odo * -1)(5); + // End of Jordy's Modifications + /// \todo double check that we can indeed treat m same as v for velocity if (config_->use_trajectory_estimation) *qdata.w_v_r_in_r_odo = *qdata.w_m_r_in_r_odo; diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 5cba410d7..d53c3cafd 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -145,6 +145,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // retrieve the transorm info from the localization chain const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; + CLOG(INFO, "path_planning.cbit") << "The T_r_v_odo is: " << T_r_v_odo; CLOG(INFO, "path_planning.cbit") << "The T_p_r is: " << T_p_r; @@ -390,13 +391,13 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { applied_vel(1) = 0.0; } - CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); + //CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits - CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; + //CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + //CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); @@ -412,6 +413,11 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { command.linear.x = saturated_vel(0); command.angular.z = saturated_vel(1); + // Temporary modification by Jordy to test calibration of hte grizzly controller + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Angular Velocity: " << saturated_vel(1); + // End of modifications + CLOG(INFO, "mpc.cbit") << "Final control command: [" << command.linear.x << ", " << command.linear.y << ", " << command.linear.z << ", " diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 670b2ba96..30bd21987 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -301,6 +301,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // retrieve the transorm info from the localization chain const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; + CLOG(INFO, "path_planning.cbit") << "The T_r_v_odo is: " << T_r_v_odo; CLOG(INFO, "path_planning.cbit") << "The T_p_r is: " << T_p_r; // Extrapolate the pose of the robot into the future based on the localization delay @@ -458,12 +459,12 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { applied_vel(1) = 0.0; } - CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); + //CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits - CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; + //CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + //CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); @@ -479,6 +480,11 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { command.linear.x = saturated_vel(0); command.angular.z = saturated_vel(1); + // Temporary modification by Jordy to test calibration of hte grizzly controller + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); + CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Angular Velocity: " << saturated_vel(1); + // End of modification + CLOG(INFO, "mpc.cbit") << "Final control command: [" << command.linear.x << ", " << command.linear.y << ", " << command.linear.z << ", " diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index bd0b0b9a8..c02dc052e 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -2443,7 +2443,7 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v Node euclid_pt = curve_to_euclid(start); if (costmap_col_tight(euclid_pt)) { - //CLOG(ERROR, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; + CLOG(ERROR, "path_planning.corridor_debug") << "Something has gone wrong, cbit path has a collision, ignoring this p_bin update"; continue; } @@ -2456,6 +2456,7 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v // if there is a collision, set q_left at the location of the current p_bin being processed to the value of q_left/q_right if (collision_check_result1.bool_result == true) { + CLOG(ERROR, "path_planning.corridor_debug") << "collision left"; //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; //CLOG(DEBUG, "path_planning.corridor_debug") << "end_left node is p: " << end_left.p << " q: " << end_left.q; double q_left = collision_check_result1.col_node.q; @@ -2483,6 +2484,7 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v if (collision_check_result2.bool_result == true) { + CLOG(ERROR, "path_planning.corridor_debug") << "collision right"; //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; //CLOG(DEBUG, "path_planning.corridor_debug") << "end_right node is p: " << end_right.p << " q: " << end_right.q; double q_right = collision_check_result2.col_node.q; @@ -2536,6 +2538,8 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v corridor->y_right.push_back(euclid_right.q); } + + //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); //CLOG(ERROR, "path_planning.corridor_debug") << "Corridor Update Time: " << duration_corridor.count() << "ms"; diff --git a/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro b/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro index 7913fc6fe..0ba4c8c3b 100644 --- a/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro +++ b/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro @@ -6,8 +6,8 @@ - --> + diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index 872da4ab7..dbfc64d33 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -74,31 +74,31 @@ Visualization Manager: Frames: All Enabled: false base_footprint: - Value: false + Value: true base_footprint_bumper_part: - Value: false + Value: true base_link: - Value: false + Value: true chassis_link: - Value: false + Value: true front_axle_link: - Value: false + Value: true front_bumblebee: - Value: false + Value: true front_left_wheel_link: - Value: false + Value: true front_right_wheel_link: - Value: false + Value: true front_xb3: - Value: false + Value: true hc: - Value: false + Value: true honeycomb: - Value: false + Value: true imu_link: - Value: false + Value: true left_lidar: - Value: false + Value: true lidar: Value: false loc vertex frame: @@ -106,33 +106,33 @@ Visualization Manager: loc vertex frame (offset): Value: false m600_base: - Value: false + Value: true microstrain: - Value: false + Value: true navsat_link: - Value: false + Value: true odo vertex frame: Value: false odom: - Value: false + Value: true planning frame: Value: false rear_antenna_link: - Value: false + Value: true rear_bumblebee: - Value: false + Value: true rear_left_wheel_link: - Value: false + Value: true rear_right_wheel_link: - Value: false + Value: true rear_sensor_base_link: - Value: false + Value: true rear_sensor_plate_link: - Value: false + Value: true rear_xb3: - Value: false + Value: true right_lidar: - Value: false + Value: true robot: Value: true robot planning: @@ -142,9 +142,9 @@ Visualization Manager: robot planning (extrapolated) mpc: Value: false sensor_anchor_link: - Value: false + Value: true velodyne: - Value: false + Value: true world: Value: false world (offset): @@ -1339,6 +1339,74 @@ Visualization Manager: Reliability Policy: Reliable Value: /vtr/corridor_path_right Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz_default_plugins/PointCloud2 + Color: 255; 255; 0 + Color Transformer: FlatColor + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: submap_loc + Position Transformer: XYZ + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Points + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/submap_loc + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz_default_plugins/PointCloud2 + Color: 0; 255; 127 + Color Transformer: FlatColor + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: submap_odom + Position Transformer: XYZ + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Points + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/submap_odo + Use Fixed Frame: true + Use rainbow: true + Value: false Enabled: true Global Options: Background Color: 181; 181; 181 @@ -1385,16 +1453,16 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 26.911510467529297 + Distance: 31.960826873779297 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: 0.1747203767299652 - Y: 0.14160969853401184 - Z: -0.6141221523284912 + X: -0.3220612406730652 + Y: 0.23669445514678955 + Z: 1.451749324798584 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false @@ -1403,7 +1471,7 @@ Visualization Manager: Pitch: 1.5697963237762451 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 3.225677490234375 + Yaw: 0.22568470239639282 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 @@ -1607,11 +1675,11 @@ Visualization Manager: Yaw: 2.295583963394165 Window Geometry: Displays: - collapsed: true + collapsed: false Height: 1016 - Hide Left Dock: true + Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d0065010000000000000450000000000000000000000738000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004b1000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: From 1bb5aaef739dc3968ebdfe120d2b13e1844b1b30 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Fri, 14 Apr 2023 13:04:07 -0400 Subject: [PATCH 17/25] working corridor mpc prototype + minor improvements to planner --- config/honeycomb_grizzly_default.yaml | 44 ++++--- launch/online_honeycomb_grizzly.launch.yaml | 2 +- main/src/vtr_lidar/src/path_planning/cbit.cpp | 6 +- .../src/path_planning/mpc_path_planner2.cpp | 24 ++-- .../mpc/scalar_log_barrier_evaluator.hpp | 102 +++++++++++++++++ main/src/vtr_path_planning/src/cbit/cbit.cpp | 4 +- .../src/cbit/cbit_path_planner.cpp | 32 ++++-- .../src/mpc/lateral_error_evaluator.cpp | 5 + .../src/mpc/mpc_path_planner2.cpp | 107 +++++++++++++++--- rviz/honeycomb.rviz | 22 ++-- 10 files changed, 267 insertions(+), 81 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index b9c6d7d17..42a960cf3 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -307,8 +307,8 @@ support_variance: 0.05 support_threshold: 2.5 - influence_distance: 0.50 # was 0.5 Jordy # Note that the total distance where grid cells have values > 0 is min dist + influence dist, not influence dist! - minimum_distance: 0.9 # was 0.3 Jordy + influence_distance: 0.5 # was 0.5 Jordy # Note that the total distance where grid cells have values > 0 is min dist + influence dist, not influence dist! + minimum_distance: 0.8 # was 0.3 Jordy # cost map costmap_history_size: 15 # Keep between 3 and 20, used for temporal filtering @@ -410,7 +410,7 @@ ############ path planning configuration ############ path_planning: type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version - control_period: 50 # ms + control_period: 100 # ms teb: # vtr specific visualize: true @@ -435,11 +435,11 @@ cbit: # Environment obs_padding: 0.0 - curv_to_euclid_discretization: 20 + curv_to_euclid_discretization: 10 sliding_window_width: 12.0 sliding_window_freespace_padding: 0.5 - corridor_resolution: 0.05 - state_update_freq: 2.0 + corridor_resolution: 0.2 + state_update_freq: 1.0 update_state: true rand_seed: 1 @@ -462,14 +462,14 @@ costmap: costmap_filter_value: 0.01 - costmap_history: 20 # Note I dont think im actually using this one anymore, am using the one in the change detection v3 now + costmap_history: 30 # Note I dont think im actually using this one anymore, am using the one in the change detection v3 now mpc: # Controller Params horizon_steps: 15 - horizon_step_size: 0.15 - forward_vel: 1.00 - max_lin_vel: 1.50 + horizon_step_size: 0.3 + forward_vel: 1.0 + max_lin_vel: 1.5 max_ang_vel: 1.00 vehicle_model: "unicycle" @@ -487,21 +487,19 @@ #lat_error_weight: 0.01 # Cost Function Covariance Matrices - #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 - pose_error_cov: [50.0, 50.0, 10000.0, 10000.0, 10000.0, 10000.0] - vel_error_cov: [500.0, 50.0] # was [0.1 2.0] # No longer using this cost term - #acc_error_cov: [2.0, 2.0] - acc_error_cov: [50.0, 50.0] - kin_error_cov: [50.0, 50.0, 50.0, 50.0, 50.0, 50.0] - #kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] - lat_error_cov: [50.0] + pose_error_cov: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0] + vel_error_cov: [10.0, 10.0] # was [0.1 2.0] # No longer using this cost term + acc_error_cov: [2.0, 2.0] + kin_error_cov: [0.05, 0.05, 0.05, 0.05, 0.05, 0.05] + lat_error_cov: [10.0] # Cost Function Weights - pose_error_weight: 2.0 - vel_error_weight: 1.0 - acc_error_weight: 1.0 - kin_error_weight: 10000.0 - lat_error_weight: 0.02 + pose_error_weight: 5.0 + vel_error_weight: 4.0 + acc_error_weight: 4.0 + kin_error_weight: 200.0 + lat_error_weight: 0.5 + #backup for tracking mpc #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 #vel_error_cov: [0.1, 1.0] # was [0.1 2.0] diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index 5075efe5f..090fd4094 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -22,7 +22,7 @@ windows: - > ros2 launch vtr_navigation vtr.launch.py base_params:=honeycomb_grizzly_default.yaml - data_dir:=${VTRDATA}/online-test-lidar/$(date '+%F')/$(date '+%F') + data_dir:=${VTRDATA}/online-test-lidar/2023-04-05/2023-04-05 start_new_graph:=false use_sim_time:=false diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index d53c3cafd..79a72dbb3 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -339,10 +339,10 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp); // Uncomment for using the mpc extrapolated robot pose for control - //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); + lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); // no extrapolation (comment this out if we are not using extrapolation) - lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); + //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); // TODO: Set whether to use mpc extrapolation as a config param (though there is almost never a good reason not to use it) @@ -391,7 +391,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { applied_vel(1) = 0.0; } - //CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); + CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index f7dfd470d..f587697ac 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -233,8 +233,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound - const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); - const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + const auto lat_barrier_right = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_right); + const auto lat_barrier_left = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, latLossFunc); @@ -248,13 +248,16 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se } // Solve the optimization problem with GuassNewton solver - using SolverType = steam::GaussNewtonSolver; + //using SolverType = steam::GaussNewtonSolver; + using SolverType = steam::LineSearchGaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; params.verbose = false; // Makes the output display for debug when true params.relative_cost_change_threshold = 1e-6; - params.max_iterations = 250; + params.max_iterations = 200; params.absolute_cost_change_threshold = 1e-6; + params.backtrack_multiplier = 0.5; // Line Search Params + params.max_backtrack_steps = 1000; // Line Search Params SolverType solver(opt_problem, params); solver.optimize(); @@ -456,7 +459,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot - for (int i = (current_sid-1); i < teach_path.size(); i++) + CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); + for (int i = (current_sid-1); i <= teach_path.size(); i++) { cbit_path.push_back(teach_path[i]); } @@ -503,6 +507,12 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path double t = (((xp-x1)*(x2-x1)) + ((yp-y1)*(y2-y1))) / total_dist; double p_robot = t; + + // Check if p is negative, (robot starting behind path, if so, make it 0.0: + if (p_robot < 0.0) + { + p_robot = 0.0; + } //double cos_angle = (dist2*dist2 + total_dist*total_dist - dist1*dist1) / (2.0*dist2*total_dist); //double xp_proj = x2 + (x1 - x2) * cos_angle; //double yp_proj = y2 + (y1 - y2) * cos_angle; @@ -565,14 +575,12 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } } //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; - // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { - //p_meas_vec.push_back((i * DT * VF) + p_correction); p_meas_vec.push_back((i * DT * VF) + p_robot); } @@ -658,7 +666,7 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - + //CLOG(ERROR, "mpc.cbit") << "The Yaw Is: " << yaw_int; // Build the transformation matrix Eigen::Matrix4d T_ref; diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp index 9b430c48f..cc7ba9698 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp @@ -102,5 +102,107 @@ typename ScalarLogBarrierEvaluator::Ptr slogbar( } // clang-format on + + + + + + +// Inverse barrier term (EXPERIMENTAL) +template +class ScalarInverseBarrierEvaluator : public Evaluable> { + public: + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; + + using InType = Eigen::Matrix; + using OutType = Eigen::Matrix; + + static Ptr MakeShared(const typename Evaluable::ConstPtr& v); + ScalarInverseBarrierEvaluator(const typename Evaluable::ConstPtr& v); + + bool active() const override; + using KeySet = typename Evaluable::KeySet; + void getRelatedVarKeys(KeySet& keys) const override; + + OutType value() const override; + typename Node::Ptr forward() const override; + void backward(const Eigen::MatrixXd& lhs, + const typename Node::Ptr& node, + Jacobians& jacs) const override; + + private: + const typename Evaluable::ConstPtr v_; +}; + +// clang-format off +template +typename ScalarInverseBarrierEvaluator::Ptr sinvbar( + const typename Evaluable::InType>::ConstPtr& v); +// clang-format on + +} // namespace vspace +} // namespace steam + +namespace steam { +namespace vspace { + +template +auto ScalarInverseBarrierEvaluator::MakeShared( + const typename Evaluable::ConstPtr& v) -> Ptr { + return std::make_shared(v); +} + +template +ScalarInverseBarrierEvaluator::ScalarInverseBarrierEvaluator( + const typename Evaluable::ConstPtr& v) + : v_(v) {} + +template +bool ScalarInverseBarrierEvaluator::active() const { + return v_->active(); +} + +template +void ScalarInverseBarrierEvaluator::getRelatedVarKeys(KeySet& keys) const { + v_->getRelatedVarKeys(keys); +} + +template +auto ScalarInverseBarrierEvaluator::value() const -> OutType { + auto v_intermed = v_->value(); + v_intermed[0] = (v_->value()[0]); // TODO. need to validate this, not 100% sure this is the best/correct way to do this log + return v_intermed; +} + +template +auto ScalarInverseBarrierEvaluator::forward() const -> typename Node::Ptr { + const auto child = v_->forward(); + auto value = child->value(); + value[0] = 1.0 / (child->value())[0]; // TODO. need to validate this, not 100% sure this is the best/correct way to do this inv + //const auto value = log((child->value()[0])); + const auto node = Node::MakeShared(value); + node->addChild(child); + return node; +} + +template +void ScalarInverseBarrierEvaluator::backward(const Eigen::MatrixXd& lhs, + const typename Node::Ptr& node, + Jacobians& jacs) const { + const auto child = std::static_pointer_cast>(node->at(0)); + if (v_->active()) { + v_->backward(-1.0 * (lhs * lhs).inverse(), child, jacs); + } +} + +// clang-format off +template +typename ScalarInverseBarrierEvaluator::Ptr sinvbar( + const typename Evaluable::InType>::ConstPtr& v) { + return ScalarInverseBarrierEvaluator::MakeShared(v); +} +// clang-format on + } // namespace vspace } // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 30bd21987..c2932c2d4 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -405,10 +405,10 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp); // Uncomment for using the mpc extrapolated robot pose for control - //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); + lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r_extp2); // no extrapolation (comment this out if we are not using extrapolation) - lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); + //lgmath::se3::Transformation T0 = lgmath::se3::Transformation(T_w_p * T_p_r); // TODO: Set whether to use mpc extrapolation as a config param (though there is almost never a good reason not to use it) diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index c02dc052e..bc8a58f6b 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -478,7 +478,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Benchmark current compute time auto stop_time = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(stop_time - start_time); - //std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; + std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; compute_time = static_cast (duration.count()); @@ -561,14 +561,22 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Vertex Prune (maintain only vertices to the right of the collision free vertex) std::vector> pruned_vertex_tree; pruned_vertex_tree.reserve(tree.V.size()); + int test_counter = 0; for (int i =0; ip >= col_free_vertex->p) { pruned_vertex_tree.push_back(tree.V[i]); } + if (tree.V[i]->p >= (p_goal->p + 11.0)) + { + test_counter = test_counter + 1; + } } tree.V = pruned_vertex_tree; + CLOG(ERROR, "path_planning.cbit_planner") << "The number of vertices in the tree outside of sliding window is:" << test_counter; + CLOG(ERROR, "path_planning.cbit_planner") << "The corresponding p threshold was: " << (p_goal->p + 11.0); + // Edge Prune (maintain only edges to the right of the collision free vertex) std::vector, std::shared_ptr>> pruned_edge_tree; @@ -632,7 +640,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // EXPERIMENTAL: Updating the dynamic corridor (now that we know our path is collision free): //auto corridor_start_time = std::chrono::high_resolution_clock::now(); - update_corridor(corridor_ptr, path_x, path_y, *p_goal); + //update_corridor(corridor_ptr, path_x, path_y, *p_goal); //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); //CLOG(ERROR, "path_planning.cbit_planner") << "Corridor Update Time: " << duration_corridor.count() << "us"; @@ -1098,7 +1106,7 @@ std::vector> CBITPlanner::SampleFreeSpace(int m) //int pre_seeds = abs(p_goal->p - p_start->p) / 0.25; int pre_seeds = abs(p_zero - p_start->p) / 0.25; // Note needed to change p_goal to p_zero. When the sliding window padding is large, pre-seeds wont get generated all the way to the goal - //std::cout << "generating pre-seeds - number is:" << pre_seeds << std::endl; + std::cout << "generating pre-seeds - number is:" << pre_seeds << std::endl; //std::cout << "The size of the tree is:" << tree.V.size() << std::endl; // In the python version I do this line thing which is more robust, but for now im going to do this quick and dirty @@ -1533,6 +1541,7 @@ std::tuple, std::shared_ptr> CBITPlanner::BestInEdge if (tree.QE2.size() == 0) // need to handle a case where the return path is 100% optimal in which case things get stuck and need ot be flagged to break { CLOG(DEBUG, "path_planning.cbit_planner") << "Edge Queue is Empty, Solution Could Not be Improved This Batch"; + repair_mode = true; return std::tuple, std::shared_ptr> {NULL, NULL}; } @@ -2205,7 +2214,7 @@ bool CBITPlanner::costmap_col(Node node) bool CBITPlanner::discrete_collision(std::vector> obs, double discretization, Node start, Node end) { // We dynamically determine the discretization based on the length of the edge - discretization = round(calc_dist(start, end) * discretization); + discretization = ceil(calc_dist(start, end) * discretization); // Generate discretized test nodes std::vector p_test; @@ -2253,11 +2262,11 @@ bool CBITPlanner::discrete_collision(std::vector> obs, doubl // But long term I should move all those over to utils or even a different header and script // Old code I wrote for this function -/* + struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end, bool tight) { // We dynamically determine the discretization based on the length of the edge - discretization = round(calc_dist(start, end) * discretization); + discretization = ceil(calc_dist(start, end) * discretization); // Generate discretized test nodes std::vector p_test; @@ -2308,13 +2317,15 @@ struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double d return {false, curv_pt}; } -*/ -// ChatGPT optimized code for this function thats 10-20x faster lol + + +// ChatGPT optimized code for this function +/* struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double discretization, Node start, Node end, bool tight) { // We dynamically determine the discretization based on the length of the edge - discretization = round(calc_dist(start, end) * discretization); + discretization = ceil(calc_dist(start, end) * discretization); // Generate discretized test nodes std::vector p_test; @@ -2365,6 +2376,7 @@ struct CBITPlanner::collision_result CBITPlanner::discrete_collision_v2(double d } return {false, curv_pt}; } +*/ @@ -2456,7 +2468,6 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v // if there is a collision, set q_left at the location of the current p_bin being processed to the value of q_left/q_right if (collision_check_result1.bool_result == true) { - CLOG(ERROR, "path_planning.corridor_debug") << "collision left"; //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; //CLOG(DEBUG, "path_planning.corridor_debug") << "end_left node is p: " << end_left.p << " q: " << end_left.q; double q_left = collision_check_result1.col_node.q; @@ -2484,7 +2495,6 @@ void CBITPlanner::update_corridor(std::shared_ptr corridor, std::v if (collision_check_result2.bool_result == true) { - CLOG(ERROR, "path_planning.corridor_debug") << "collision right"; //CLOG(DEBUG, "path_planning.corridor_debug") << "start node is p: " << start.p << " q: " << start.q; //CLOG(DEBUG, "path_planning.corridor_debug") << "end_right node is p: " << end_right.p << " q: " << end_right.q; double q_right = collision_check_result2.col_node.q; diff --git a/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp index e711eaaca..462c6a31f 100644 --- a/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp +++ b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp @@ -1,4 +1,5 @@ #include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include namespace steam { namespace stereo { @@ -29,6 +30,10 @@ auto HomoPointErrorEvaluatorRight::value() const -> OutType { auto HomoPointErrorEvaluatorRight::forward() const -> Node::Ptr { const auto child = pt_->forward(); const auto value = D_ * (child->value() - meas_pt_); + if (value <= 0.0) + { + std::cout << "PROBLEM: The value was: " << value << std::endl; + } const auto node = Node::MakeShared(value); node->addChild(child); return node; diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index 952418140..00e4e33c9 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -22,6 +22,7 @@ #include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" //#include "vtr_lidar/cache.hpp" // For lidar version of the planner only + // Updated version using corridor constrained MPC // This file is used to generate a tracking mpc output velocity command given a discretized path to follow and optimization parameters @@ -97,11 +98,11 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Set the remaining states - for (int i=0; i previous_vel, lgmath::se } - // Laterial Barrier State Constraints if (i > 0) { @@ -215,21 +215,27 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable Eigen::Matrix barrier_right; barrier_right << 0, - -2.5, + barrier_q_right[i], 0, 1; Eigen::Matrix barrier_left; barrier_left << 0.0, - 2.5, + barrier_q_left[i], 0, 1; + //CLOG(WARNING, "mpc.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; + //CLOG(WARNING, "mpc.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; + //CLOG(ERROR, "mpc.cbit") << "The Initaial Pose is:" << T0; + //CLOG(WARNING, "mpc.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); + //CLOG(ERROR, "mpc.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound - const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); - const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + const auto lat_barrier_right = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_right); + const auto lat_barrier_left = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_left); // Generate least square cost terms and add them to the optimization problem const auto lat_cost_term_right = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_right, sharedLatNoiseModel, latLossFunc); @@ -237,18 +243,22 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, latLossFunc); opt_problem.addCostTerm(lat_cost_term_left); //CLOG(WARNING, "debug") << "Running the cbit one"; + + } - } // Solve the optimization problem with GuassNewton solver - using SolverType = steam::GaussNewtonSolver; + //using SolverType = steam::GaussNewtonSolver; + using SolverType = steam::LineSearchGaussNewtonSolver; // Initialize parameters (enable verbose mode) SolverType::Params params; params.verbose = false; // Makes the output display for debug when true - params.relative_cost_change_threshold = 0.000001; - //params.max_iterations = 500; - params.absolute_cost_change_threshold = 0.000001; + params.relative_cost_change_threshold = 1e-6; + params.max_iterations = 200; + params.absolute_cost_change_threshold = 1e-6; + params.backtrack_multiplier = 0.5; // Line Search Params + params.max_backtrack_steps = 1000; // Line Search Params SolverType solver(opt_problem, params); solver.optimize(); @@ -389,7 +399,7 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi p_meas_vec.push_back((i * DT * VF) + p_correction); } //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; - + // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could be combined in the previous loop too bool point_stabilization = false; @@ -450,11 +460,72 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot - for (int i = (current_sid-1); i < teach_path.size(); i++) + CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); + for (int i = (current_sid-1); i <= teach_path.size(); i++) { cbit_path.push_back(teach_path[i]); } + //DEBUG: + //CLOG(WARNING, "mpc_debug.cbit") << "The robot is at: x: " << std::get<0>(robot_pose) << " y: " << std::get<1>(robot_pose); + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 0 x: " << teach_path[0].x << " y: " << teach_path[0].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 1 x: " << teach_path[1].x << " y: " << teach_path[1].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 2 x: " << teach_path[2].x << " y: " << teach_path[2].y; + //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 3 x: " << teach_path[3].x << " y: " << teach_path[3].y; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + + double x2; + double x1; + double y2; + double y1; + double p_val_ref; + + if (current_sid == 0) + { + x2 = teach_path[1].x; + x1 = teach_path[0].x; + y2 = teach_path[1].y; + y1 =teach_path[0].x; + p_val_ref = global_path_ptr->p[0]; + } + else + { + x2 = teach_path[current_sid+1].x; + x1 = teach_path[current_sid-1].x; + y2 = teach_path[current_sid+1].y; + y1 = teach_path[current_sid-1].y; + p_val_ref = global_path_ptr->p[current_sid-1]; + } + double xp = std::get<0>(robot_pose); + double yp = std::get<1>(robot_pose); + + //double dist1 = sqrt(((xp-x1) * (xp-x1)) + ((yp-y1) * (yp-y1))); + //double dist2 = sqrt(((xp-x2) * (xp-x2)) + ((yp-y2) * (yp-y2))); + double total_dist = sqrt(((x2-x1) * (x2-x1)) + ((y2-y1) * (y2-y1))); + + double t = (((xp-x1)*(x2-x1)) + ((yp-y1)*(y2-y1))) / total_dist; + double p_robot = t; + + // Check if p is negative, (robot starting behind path, if so, make it 0.0: + if (p_robot < 0.0) + { + p_robot = 0.0; + } + //double cos_angle = (dist2*dist2 + total_dist*total_dist - dist1*dist1) / (2.0*dist2*total_dist); + //double xp_proj = x2 + (x1 - x2) * cos_angle; + //double yp_proj = y2 + (y1 - y2) * cos_angle; + //double p_robot = sqrt(((xp_proj-x1) * (xp_proj-x1)) + (yp_proj-y1) * (yp_proj-y1)) + p_val_ref; + //CLOG(WARNING, "mpc_debug.cbit") << "Projected Pose is x: " << xp_proj<< " y: " << yp_proj; + //CLOG(WARNING, "mpc_debug.cbit") << "The starting robot pose p is: " << p_robot; + + + + //End of Debug + + // Save a copy of the current path solution to work on //auto cbit_path = *cbit_path_ptr; @@ -505,15 +576,14 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } } //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; - // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { - - p_meas_vec.push_back((i * DT * VF) + p_correction); + //p_meas_vec.push_back((i * DT * VF) + p_correction); + p_meas_vec.push_back((i * DT * VF) + p_robot); } //CLOG(WARNING, "debug") << "p_meas_vec is: " << p_meas_vec; @@ -535,6 +605,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path point_stabilization = true; // Enable point stabilization configs CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } + lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; @@ -596,7 +667,7 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - + //CLOG(ERROR, "mpc.cbit") << "The Yaw Is: " << yaw_int; // Build the transformation matrix Eigen::Matrix4d T_ref; diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index dbfc64d33..22216abaf 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -85,10 +85,6 @@ Visualization Manager: Value: true front_bumblebee: Value: true - front_left_wheel_link: - Value: true - front_right_wheel_link: - Value: true front_xb3: Value: true hc: @@ -121,10 +117,6 @@ Visualization Manager: Value: true rear_bumblebee: Value: true - rear_left_wheel_link: - Value: true - rear_right_wheel_link: - Value: true rear_sensor_base_link: Value: true rear_sensor_plate_link: @@ -1453,7 +1445,7 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 31.960826873779297 + Distance: 27.72052001953125 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 @@ -1471,7 +1463,7 @@ Visualization Manager: Pitch: 1.5697963237762451 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 0.22568470239639282 + Yaw: 3.288867473602295 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 @@ -1675,11 +1667,11 @@ Visualization Manager: Yaw: 2.295583963394165 Window Geometry: Displays: - collapsed: false + collapsed: true Height: 1016 - Hide Left Dock: false + Hide Left Dock: true Hide Right Dock: true - QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004b1000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd000000040000000000000281000003a2fc0200000007fc0000003d000001a20000000000fffffffaffffffff0100000002fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730000000000ffffffff0000008f00fffffffb0000001200530065006c0065006300740069006f006e0000000000000002530000006c00fffffffb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000003b000003a2000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c0000026100000001000001c0000003bdfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000003bd000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005000000006efc0100000002fb0000000800540069006d00650000000000000005000000024400fffffffb0000000800540069006d006501000000000000045000000000000000000000039c000003a200000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -1688,6 +1680,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1848 - X: 72 + Width: 924 + X: 996 Y: 27 From 5a50926a6f9ac081695efbf81d0b1a564de4d32a Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Fri, 14 Apr 2023 13:45:16 -0400 Subject: [PATCH 18/25] Corridor MPC cleanup, needs more field testing but I think stable --- main/src/vtr_lidar/src/path_planning/cbit.cpp | 37 +-- .../src/path_planning/mpc_path_planner2.cpp | 211 ++++++------------ main/src/vtr_path_planning/src/cbit/cbit.cpp | 9 +- .../src/mpc/mpc_path_planner2.cpp | 207 ++++++----------- 4 files changed, 156 insertions(+), 308 deletions(-) diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 79a72dbb3..99b2a9bc4 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -193,12 +193,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { CLOG(DEBUG, "obstacle_detection.cbit") << "The costmap to world transform is: " << T_start_vertex.inverse(); costmap_ptr->grid_resolution = change_detection_costmap->dl(); - - // Note I think all of the code below here was legacy when I was temporally filtering in cbit, now the most recent obs map should contain the history of the costmaps all embedded - // So in cbit_planner we only need to query the value of costmap_ptr->T_c_w and costmap_ptr->obs_map - - - // Experimental: Storing sequences of costmaps for temporal filtering purposes + // Storing sequences of costmaps for temporal filtering purposes // For the first x iterations, fill the obstacle vector if (costmap_ptr->obs_map_vect.size() < config_->costmap_history) { @@ -208,30 +203,9 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // After that point, we then do a sliding window using shift operations, moving out the oldest map and appending the newest one else { - // Legacy code for when I was filtering inside cbit (made a quick and dirty work around temporarily) - //for (int i = 0; i < (config_->costmap_history-1); i++) - //{ - // costmap_ptr->obs_map_vect[i] = costmap_ptr->obs_map_vect[i + 1]; - // costmap_ptr->T_c_w_vect[i] = costmap_ptr->T_c_w_vect[i + 1]; - //} costmap_ptr->obs_map_vect[config_->costmap_history-1] = obs_map; costmap_ptr->T_c_w_vect[config_->costmap_history-1] = costmap_ptr->T_c_w ; } - - // //TODO: Experimental global costmap filtering idea - // Pseudocode: - // 1. Iterate through all the keys in the unordered map, generate a point from the centre of the cell, transform that point to the world frame. - // 2. Repeat this for the 3 most recent costmaps - // 3. In the global map, we purge it of all obstacle cells that are not within a particular radius of the current robot state (do this in a loop at the start) - // 4. each transformed point can be assigned to a cell bin of size "grid_resolution" in the world frame. If all 3 of the recent scans (or whatever level of transient filter we want) - // agree on an obs cell, we create an unordered map key value pair in the global map pointer - // 5. This will generate a global obstacle map with transients filtered, but will also remember obstacles it had seen previously but have since come to close - // 6. Then we just pass the global map to cbit, which only needs to collision check one cost map and all the filtering is contained here - - // I think this would work well, only issue is if you have a tall dynamic object walking very close to the robot (in the permanence range) - // In that case it will add those cells to the map and basically never remove them, but not much we can do about that without better sensor fov/predictions - - } // END OF OBSTACLE PERCEPTION UPDATES @@ -377,7 +351,9 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; + // Solve using corridor mpc auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + // Old path tracking configs //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here @@ -395,9 +371,10 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // If required, saturate the output velocity commands based on the configuration limits - //CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; + CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - //CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + CLOG(INFO, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); @@ -413,7 +390,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { command.linear.x = saturated_vel(0); command.angular.z = saturated_vel(1); - // Temporary modification by Jordy to test calibration of hte grizzly controller + // Temporary modification by Jordy to test calibration of the grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Angular Velocity: " << saturated_vel(1); // End of modifications diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index f587697ac..92440ecfb 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -28,7 +28,7 @@ // It is used in cbit.cpp in both the vtr_lidar package (obstacle avoidance) and vtr_path_planning packages (obstacle free) in the computeCommand function -// Main MPC problem solve function +// Main MPC problem solve function - TODO: dump all these arguments into an mpc config class struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { @@ -55,8 +55,6 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se I_4 << 0, 0, 0; - //Eigen::Matrix I_2_tran; - //I_2_tran << 0, 1, 0, 0; // Setup shared loss functions and noise models for all cost terms @@ -64,12 +62,12 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument const auto poseLossFunc = steam::L2WeightedLossFunc::MakeShared(pose_error_weight); - const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); // todo, add a param for this + const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); const auto accelLossFunc = steam::L2WeightedLossFunc::MakeShared(acc_error_weight); const auto kinLossFunc = steam::L2WeightedLossFunc::MakeShared(kin_error_weight); const auto latLossFunc = steam::L2WeightedLossFunc::MakeShared(lat_error_weight); - + // Cost term Noise Covariance Initialization const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); const auto sharedVelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); const auto sharedAccelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); @@ -86,22 +84,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se lgmath::se3::Transformation T0_inv = T0.inverse(); Eigen::Vector2d v0(0.0, 0.0); - // Pushback the initial states (current robot state) - pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug - //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() + // Push back the initial states (current robot state) + pose_states.push_back(T0_inv); vel_states.push_back(v0); - //CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; - //CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; - //CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; - + //CLOG(DEBUG, "mpc_debug.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; + //CLOG(DEBUG, "mpc_debug.cbit") << "T0_inv: " << T0_inv; + //CLOG(ERROR, "mpc_debug.cbit") << "1st meas: " << measurements_cbit[0]; - // Set the remaining states + // Set the remaining states using a warm start from the cbit solution for (int i=1; i previous_vel, lgmath::se std::vector pose_state_vars; std::vector::Ptr> vel_state_vars; std::vector measurement_vars; // This one is for storing measurements as locked evaluators for barrier constraints + // Create a locked state var for the 4th column of the identity matrix (used in state constraint) - steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); // For some reason I_4 needs to be 3x1, it cant handle 4x1's? + steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); I_4_eval->locked() = true; + // Create Steam variables for (int i = 0; i < K; i++) { pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); } - // Lock the first (current robot) state from being able to be modified during the optimization + // Lock the first (current robot) state from being modified during the optimization pose_state_vars[0]->locked() = true; + // Setup the optimization problem steam::OptimizationProblem opt_problem; for (int i=0; i previous_vel, lgmath::se const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); opt_problem.addCostTerm(pose_cost_term); - // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) - //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); - //opt_problem.addCostTerm(vel_cost_term); - - - // Kinematic constraints (softened but penalized heavily) if (i < (K-1)) { @@ -162,12 +154,11 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, kinLossFunc); opt_problem.addCostTerm(kin_cost_term); - // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + // Non-Zero Velocity Penalty (penalty of non resting control effort helps with point stabilization) const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); opt_problem.addCostTerm(vel_cost_term); - - // Experimental velocity set-point constraint (instead of non zero velocity penalty) + // Velocity set-point constraint - No longer using this due to complications when repeating a path in reverse // Only add this cost term if we are not in point stabilization mode (end of path) //if (point_stabilization == false) //{ @@ -175,9 +166,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // opt_problem.addCostTerm(vel_cost_term); //} - - // Experimental acceleration Constraints - + // Acceleration Constraints if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state @@ -208,10 +197,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); - // Using a custom HomoPointErrorEvaluator, get lateral error (which is the same as the built-in stereo error evaluator but isolates the lateral error component of the 4x1 homo point vector error) - // We do this twice, once for each side of the corridor state constraint - - // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable + // Build lateral barrier terms by querying the current cbit corridor Eigen::Matrix barrier_right; barrier_right << 0, barrier_q_right[i], @@ -223,15 +209,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se 0, 1; - //CLOG(WARNING, "mpc.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; - //CLOG(WARNING, "mpc.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; - //CLOG(ERROR, "mpc.cbit") << "The Initaial Pose is:" << T0; - //CLOG(WARNING, "mpc.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); - //CLOG(ERROR, "mpc.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + //CLOG(INFO, "mpc_debug.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; + //CLOG(INFO, "mpc_debug.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The Initaial Pose is:" << T0; + //CLOG(INFO, "mpc_debug.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); + //CLOG(INFO, "mpc_debug.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + // compute the lateral error using a custom Homogenous point error STEAM evaluator const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + // Previously used Log barriers, however due to instability switch to using inverse squared barriers + //const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); + //const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound const auto lat_barrier_right = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_right); const auto lat_barrier_left = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_left); @@ -241,24 +232,24 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se opt_problem.addCostTerm(lat_cost_term_right); const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, latLossFunc); opt_problem.addCostTerm(lat_cost_term_left); - //CLOG(WARNING, "debug") << "Running the cbit one"; - - + } } // Solve the optimization problem with GuassNewton solver - //using SolverType = steam::GaussNewtonSolver; + //using SolverType = steam::GaussNewtonSolver; // Old solver, does not have back stepping capability using SolverType = steam::LineSearchGaussNewtonSolver; - // Initialize parameters (enable verbose mode) + // Initialize solver parameters SolverType::Params params; params.verbose = false; // Makes the output display for debug when true params.relative_cost_change_threshold = 1e-6; params.max_iterations = 200; params.absolute_cost_change_threshold = 1e-6; - params.backtrack_multiplier = 0.5; // Line Search Params - params.max_backtrack_steps = 1000; // Line Search Params + params.backtrack_multiplier = 0.5; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver + params.max_backtrack_steps = 1000; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver SolverType solver(opt_problem, params); + + // Solve the optimization problem solver.optimize(); // DEBUG: Display the several of the prediction horizon results @@ -328,11 +319,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se - - - - // helper function for computing the optimization reference poses T_ref based on the current path solution +// This is specifically for the tracking mpc, but is also used to generate the warm start poses for the corridor mpc struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF) { @@ -340,8 +328,6 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; - // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) - // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -386,7 +372,7 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi break; } } - //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "cbit_p is: " << cbit_p; // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; @@ -397,19 +383,19 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi p_meas_vec.push_back((i * DT * VF) + p_correction); } - //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(DEBUG, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; - // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values - // Note this could be combined in the previous loop too + // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could probably just be combined in the previous loop too bool point_stabilization = false; for (int i = 0; i < p_meas_vec.size(); i++) { // handle end of path case: // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path // This will cause the intepolation to return the final cbit_path pose for all measurements past this point - //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; - //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); - //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + //CLOG(INFO, "mpc_debug.cbit") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "mpc_debug.cbit") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) { @@ -424,19 +410,11 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } return {measurements, point_stabilization}; - - - //End of Experimental Changes - } -// For generating VT&R teach path measurements +// For generating VT&R teach path measurements used in the corridor mpc struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) { - // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; - // Initialize vectors storing the barrier values: std::vector barrier_q_left; std::vector barrier_q_right; @@ -456,7 +434,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path // Store the global p value of the previous sid. This is the point I use as my zero reference for the remaining p's double sid_p = global_path_ptr->p[current_sid-1]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); @@ -471,11 +449,13 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 1 x: " << teach_path[1].x << " y: " << teach_path[1].y; //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 2 x: " << teach_path[2].x << " y: " << teach_path[2].y; //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 3 x: " << teach_path[3].x << " y: " << teach_path[3].y; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + + // Efficient way of calculating the current p value of the robot w.r.t the teach path double x2; double x1; double y2; @@ -500,35 +480,16 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } double xp = std::get<0>(robot_pose); double yp = std::get<1>(robot_pose); - - //double dist1 = sqrt(((xp-x1) * (xp-x1)) + ((yp-y1) * (yp-y1))); - //double dist2 = sqrt(((xp-x2) * (xp-x2)) + ((yp-y2) * (yp-y2))); double total_dist = sqrt(((x2-x1) * (x2-x1)) + ((y2-y1) * (y2-y1))); double t = (((xp-x1)*(x2-x1)) + ((yp-y1)*(y2-y1))) / total_dist; double p_robot = t; - // Check if p is negative, (robot starting behind path, if so, make it 0.0: + // Check if p is negative, (robot starting behind path, if so, make it 0.0, otherwise this will cause optimization problems) if (p_robot < 0.0) { p_robot = 0.0; } - //double cos_angle = (dist2*dist2 + total_dist*total_dist - dist1*dist1) / (2.0*dist2*total_dist); - //double xp_proj = x2 + (x1 - x2) * cos_angle; - //double yp_proj = y2 + (y1 - y2) * cos_angle; - //double p_robot = sqrt(((xp_proj-x1) * (xp_proj-x1)) + (yp_proj-y1) * (yp_proj-y1)) + p_val_ref; - //CLOG(WARNING, "mpc_debug.cbit") << "Projected Pose is x: " << xp_proj<< " y: " << yp_proj; - //CLOG(WARNING, "mpc_debug.cbit") << "The starting robot pose p is: " << p_robot; - - - - //End of Debug - - - // Save a copy of the current path solution to work on - //auto cbit_path = *cbit_path_ptr; - - // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot @@ -563,7 +524,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path new_dist = sqrt((dx * dx) + (dy * dy)); if (new_dist < min_dist) { - CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + //CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; p_correction = lookahead_dist; min_dist = new_dist; } @@ -574,29 +535,28 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path break; } } - //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "cbit_p is: " << cbit_p; // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { - //p_meas_vec.push_back((i * DT * VF) + p_correction); p_meas_vec.push_back((i * DT * VF) + p_robot); } - //CLOG(WARNING, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(WARNING, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; - // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values - // Note this could be combined in the previous loop too + // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could probably be combined in the previous loop too bool point_stabilization = false; for (int i = 0; i < p_meas_vec.size(); i++) { // handle end of path case: // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path // This will cause the intepolation to return the final cbit_path pose for all measurements past this point - //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; - //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); - //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + //CLOG(INFO, "mpc_debug.cbit") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "mpc_debug.cbit") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) { @@ -608,13 +568,10 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; - // add to measurement vector measurements.push_back(meas); - - // Find the corresponding left and right barrier q values to pass to the mpc // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) @@ -628,11 +585,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); - //CLOG(WARNING, "debug") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; - //CLOG(WARNING, "debug") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; - - - + //CLOG(WARNING, "mpc_debug.cbit") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "mpc_debug.cbit") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; } return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; @@ -640,7 +594,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path -// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// function takes in the cbit path solution with a vector defining the p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) { @@ -664,9 +618,10 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c // For yaw we need to be abit careful about sign and angle wrap around // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts - + // TODO: There is a problem here for reverse planning, will need to rotate the yaw 180 degrees in that case. + // For normal forward planning this is fine though double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - //CLOG(ERROR, "mpc.cbit") << "The Yaw Is: " << yaw_int; + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw Is: " << yaw_int; // Build the transformation matrix Eigen::Matrix4d T_ref; @@ -692,17 +647,6 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel double command_ang_z; Eigen::Matrix saturated_vel; - // Moved nan check to the main mpc solver function - /* - // First check if any of the values are nan, if so we return a zero velocity and flag the error - if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) - { - CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; - saturated_vel(0) = 0.0; - saturated_vel(1) = 0.0; - return saturated_vel; - } - */ if (abs(applied_vel(0)) >= v_lim) { @@ -715,11 +659,7 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel command_lin_x = -1.0* v_lim; } } - // Removed for bi-directional control purposes - //else if (applied_vel(0) <= 0.0) - //{ - // command_lin_x = 0.0; - //} + else { command_lin_x = applied_vel(0) ; @@ -736,19 +676,12 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel command_ang_z = -1.0 * w_lim; } } - //else if (applied_vel(1) <= -1*w_lim) - //{ - // command_ang_z = -1*w_lim; - //} + else { command_ang_z = applied_vel(1) ; } - // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 - saturated_vel << command_lin_x, command_ang_z; return saturated_vel; -} - - +} \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index c2932c2d4..330685984 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -265,7 +265,7 @@ void CBIT::initializeRoute(RobotState& robot_state) { CLOG(INFO, "path_planning.cbit") << "The path repeat direction is:" << path_direction; - CLOG(ERROR, "path_planning.cbit") << "Trying to create global path"; + CLOG(INFO, "path_planning.cbit") << "Trying to create global path"; // Create the path class object (Path preprocessing) CBITPath global_path(cbit_config, euclid_path_vec); // Make a pointer to this path @@ -445,7 +445,9 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; + // Solve using corridor mpc auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + // Solve using tracking mpc //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here @@ -462,9 +464,10 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { //CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits - //CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; + CLOG(INFO, "mpc.cbit") << "Saturating the velocity command if required"; Eigen::Matrix saturated_vel = SaturateVel2(applied_vel, config_->max_lin_vel, config_->max_ang_vel); - //CLOG(ERROR, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + CLOG(INFO, "mpc.cbit") << "The Saturated linear velocity is: " << saturated_vel(0) << " The angular vel is: " << saturated_vel(1); + // Store the result in memory so we can use previous state values to re-initialize and extrapolate the robot pose in subsequent iterations vel_history.erase(vel_history.begin()); vel_history.push_back(saturated_vel); diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index 00e4e33c9..af4e41b08 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -29,7 +29,7 @@ // It is used in cbit.cpp in both the vtr_lidar package (obstacle avoidance) and vtr_path_planning packages (obstacle free) in the computeCommand function -// Main MPC problem solve function +// Main MPC problem solve function - TODO: dump all these arguments into an mpc config class struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { @@ -56,8 +56,6 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se I_4 << 0, 0, 0; - //Eigen::Matrix I_2_tran; - //I_2_tran << 0, 1, 0, 0; // Setup shared loss functions and noise models for all cost terms @@ -65,12 +63,12 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // The custom L2WeightedLossFunc allows you to dynamically set the weights of cost terms by providing the value as an argument const auto poseLossFunc = steam::L2WeightedLossFunc::MakeShared(pose_error_weight); - const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); // todo, add a param for this + const auto velLossFunc = steam::L2WeightedLossFunc::MakeShared(vel_error_weight); const auto accelLossFunc = steam::L2WeightedLossFunc::MakeShared(acc_error_weight); const auto kinLossFunc = steam::L2WeightedLossFunc::MakeShared(kin_error_weight); const auto latLossFunc = steam::L2WeightedLossFunc::MakeShared(lat_error_weight); - + // Cost term Noise Covariance Initialization const auto sharedPoseNoiseModel = steam::StaticNoiseModel<6>::MakeShared(pose_noise_vect); const auto sharedVelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(vel_noise_vect); const auto sharedAccelNoiseModel = steam::StaticNoiseModel<2>::MakeShared(accel_noise_vect); @@ -87,22 +85,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se lgmath::se3::Transformation T0_inv = T0.inverse(); Eigen::Vector2d v0(0.0, 0.0); - // Pushback the initial states (current robot state) - pose_states.push_back(T0_inv); // Change this to T0 when implementing on robot, T0_1 for debug - //vel_states.push_back(std::vector {0.0, 0.0}); //I think a single line way t odo this is something like Eigen::Matrix::Zero() + // Push back the initial states (current robot state) + pose_states.push_back(T0_inv); vel_states.push_back(v0); - //CLOG(ERROR, "mpc.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; - //CLOG(ERROR, "mpc.cbit") << "T0_inv: " << T0_inv; - //CLOG(ERROR, "mpc.cbit") << "1st meas: " << measurements_cbit[0]; - + //CLOG(DEBUG, "mpc_debug.cbit") << "Verifying that the initial robot state is about the same as the initial reference measurement"; + //CLOG(DEBUG, "mpc_debug.cbit") << "T0_inv: " << T0_inv; + //CLOG(ERROR, "mpc_debug.cbit") << "1st meas: " << measurements_cbit[0]; - // Set the remaining states + // Set the remaining states using a warm start from the cbit solution for (int i=1; i previous_vel, lgmath::se std::vector pose_state_vars; std::vector::Ptr> vel_state_vars; std::vector measurement_vars; // This one is for storing measurements as locked evaluators for barrier constraints + // Create a locked state var for the 4th column of the identity matrix (used in state constraint) - steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); // For some reason I_4 needs to be 3x1, it cant handle 4x1's? + steam::stereo::HomoPointStateVar::Ptr I_4_eval = steam::stereo::HomoPointStateVar::MakeShared(I_4); I_4_eval->locked() = true; + // Create Steam variables for (int i = 0; i < K; i++) { pose_state_vars.push_back(steam::se3::SE3StateVar::MakeShared(pose_states[i])); vel_state_vars.push_back(steam::vspace::VSpaceStateVar<2>::MakeShared(vel_states[i])); } - // Lock the first (current robot) state from being able to be modified during the optimization + // Lock the first (current robot) state from being modified during the optimization pose_state_vars[0]->locked() = true; + // Setup the optimization problem steam::OptimizationProblem opt_problem; for (int i=0; i previous_vel, lgmath::se const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); opt_problem.addCostTerm(pose_cost_term); - // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) - //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, sharedLossFunc); - //opt_problem.addCostTerm(vel_cost_term); - - - // Kinematic constraints (softened but penalized heavily) if (i < (K-1)) { @@ -163,12 +155,11 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto kin_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(kin_error_func, sharedKinNoiseModel, kinLossFunc); opt_problem.addCostTerm(kin_cost_term); - // Non-Zero Velocity Penalty (OLD, not using this way any more, though might change to this when approaching end of path) + // Non-Zero Velocity Penalty (penalty of non resting control effort helps with point stabilization) const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); opt_problem.addCostTerm(vel_cost_term); - - // Experimental velocity set-point constraint (instead of non zero velocity penalty) + // Velocity set-point constraint - No longer using this due to complications when repeating a path in reverse // Only add this cost term if we are not in point stabilization mode (end of path) //if (point_stabilization == false) //{ @@ -176,9 +167,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // opt_problem.addCostTerm(vel_cost_term); //} - - // Experimental acceleration Constraints - + // Acceleration Constraints if (i == 0) { // On the first iteration, we need to use an error with the previously applied control command state @@ -209,10 +198,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); - // Using a custom HomoPointErrorEvaluator, get lateral error (which is the same as the built-in stereo error evaluator but isolates the lateral error component of the 4x1 homo point vector error) - // We do this twice, once for each side of the corridor state constraint - - // DEBUG, for now using a static fixed corridor just to get things working, TODO swap this out with dynamic corridor when stable + // Build lateral barrier terms by querying the current cbit corridor Eigen::Matrix barrier_right; barrier_right << 0, barrier_q_right[i], @@ -224,15 +210,20 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se 0, 1; - //CLOG(WARNING, "mpc.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; - //CLOG(WARNING, "mpc.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; - //CLOG(ERROR, "mpc.cbit") << "The Initaial Pose is:" << T0; - //CLOG(WARNING, "mpc.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); - //CLOG(ERROR, "mpc.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + //CLOG(INFO, "mpc_debug.cbit") << "Left Barrier for this meas is: " << barrier_q_left[i]; + //CLOG(INFO, "mpc_debug.cbit") << "Right Barrier for tis meas is: " << barrier_q_right[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The Initaial Pose is:" << T0; + //CLOG(INFO, "mpc_debug.cbit") << "The cbit measurement for this value is: " << measurements_cbit[i].inverse(); + //CLOG(INFO, "mpc_debug.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); + // compute the lateral error using a custom Homogenous point error STEAM evaluator const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + // Previously used Log barriers, however due to instability switch to using inverse squared barriers + //const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); + //const auto lat_barrier_left = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_left); + // For each side of the barrier, compute a scalar inverse barrier term to penalize being close to the bound const auto lat_barrier_right = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_right); const auto lat_barrier_left = steam::vspace::ScalarInverseBarrierEvaluator<1>::MakeShared(lat_error_left); @@ -242,24 +233,24 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se opt_problem.addCostTerm(lat_cost_term_right); const auto lat_cost_term_left = steam::WeightedLeastSqCostTerm<1>::MakeShared(lat_barrier_left, sharedLatNoiseModel, latLossFunc); opt_problem.addCostTerm(lat_cost_term_left); - //CLOG(WARNING, "debug") << "Running the cbit one"; - - + } } // Solve the optimization problem with GuassNewton solver - //using SolverType = steam::GaussNewtonSolver; + //using SolverType = steam::GaussNewtonSolver; // Old solver, does not have back stepping capability using SolverType = steam::LineSearchGaussNewtonSolver; - // Initialize parameters (enable verbose mode) + // Initialize solver parameters SolverType::Params params; params.verbose = false; // Makes the output display for debug when true params.relative_cost_change_threshold = 1e-6; params.max_iterations = 200; params.absolute_cost_change_threshold = 1e-6; - params.backtrack_multiplier = 0.5; // Line Search Params - params.max_backtrack_steps = 1000; // Line Search Params + params.backtrack_multiplier = 0.5; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver + params.max_backtrack_steps = 1000; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver SolverType solver(opt_problem, params); + + // Solve the optimization problem solver.optimize(); // DEBUG: Display the several of the prediction horizon results @@ -329,11 +320,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se - - - - // helper function for computing the optimization reference poses T_ref based on the current path solution +// This is specifically for the tracking mpc, but is also used to generate the warm start poses for the corridor mpc struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF) { @@ -341,8 +329,6 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; - // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) - // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -387,7 +373,7 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi break; } } - //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "cbit_p is: " << cbit_p; // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; @@ -398,19 +384,19 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi p_meas_vec.push_back((i * DT * VF) + p_correction); } - //CLOG(DEBUG, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(DEBUG, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; - // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values - // Note this could be combined in the previous loop too + // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could probably just be combined in the previous loop too bool point_stabilization = false; for (int i = 0; i < p_meas_vec.size(); i++) { // handle end of path case: // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path // This will cause the intepolation to return the final cbit_path pose for all measurements past this point - //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; - //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); - //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + //CLOG(INFO, "mpc_debug.cbit") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "mpc_debug.cbit") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) { @@ -425,19 +411,11 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi } return {measurements, point_stabilization}; - - - //End of Experimental Changes - } -// For generating VT&R teach path measurements +// For generating VT&R teach path measurements used in the corridor mpc struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid) { - // note this was some rapid prototype code written quite quickly for a meeting, need to refactor this longer term to make it faster for longer paths - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Starting to Pre-process global path"; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The current sid is: " << current_sid; - // Initialize vectors storing the barrier values: std::vector barrier_q_left; std::vector barrier_q_right; @@ -457,7 +435,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path // Store the global p value of the previous sid. This is the point I use as my zero reference for the remaining p's double sid_p = global_path_ptr->p[current_sid-1]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The global reference p is: " << sid_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); @@ -472,11 +450,13 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 1 x: " << teach_path[1].x << " y: " << teach_path[1].y; //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 2 x: " << teach_path[2].x << " y: " << teach_path[2].y; //CLOG(WARNING, "mpc_debug.cbit") << "Teach Path 3 x: " << teach_path[3].x << " y: " << teach_path[3].y; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; - //CLOG(WARNING, "corridor_mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[0] is: " << global_path_ptr->p[0]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[1] is: " << global_path_ptr->p[1]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[2] is: " << global_path_ptr->p[2]; + //CLOG(WARNING, "mpc_debug.cbit") << "P[3] is: " << global_path_ptr->p[3]; + + // Efficient way of calculating the current p value of the robot w.r.t the teach path double x2; double x1; double y2; @@ -501,35 +481,16 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } double xp = std::get<0>(robot_pose); double yp = std::get<1>(robot_pose); - - //double dist1 = sqrt(((xp-x1) * (xp-x1)) + ((yp-y1) * (yp-y1))); - //double dist2 = sqrt(((xp-x2) * (xp-x2)) + ((yp-y2) * (yp-y2))); double total_dist = sqrt(((x2-x1) * (x2-x1)) + ((y2-y1) * (y2-y1))); double t = (((xp-x1)*(x2-x1)) + ((yp-y1)*(y2-y1))) / total_dist; double p_robot = t; - // Check if p is negative, (robot starting behind path, if so, make it 0.0: + // Check if p is negative, (robot starting behind path, if so, make it 0.0, otherwise this will cause optimization problems) if (p_robot < 0.0) { p_robot = 0.0; } - //double cos_angle = (dist2*dist2 + total_dist*total_dist - dist1*dist1) / (2.0*dist2*total_dist); - //double xp_proj = x2 + (x1 - x2) * cos_angle; - //double yp_proj = y2 + (y1 - y2) * cos_angle; - //double p_robot = sqrt(((xp_proj-x1) * (xp_proj-x1)) + (yp_proj-y1) * (yp_proj-y1)) + p_val_ref; - //CLOG(WARNING, "mpc_debug.cbit") << "Projected Pose is x: " << xp_proj<< " y: " << yp_proj; - //CLOG(WARNING, "mpc_debug.cbit") << "The starting robot pose p is: " << p_robot; - - - - //End of Debug - - - // Save a copy of the current path solution to work on - //auto cbit_path = *cbit_path_ptr; - - // Experimental Changes to improve controller instability (completed but not rigourously field tested yet) // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot @@ -564,7 +525,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path new_dist = sqrt((dx * dx) + (dy * dy)); if (new_dist < min_dist) { - CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; + //CLOG(DEBUG, "mpc_debug.cbit") << "Minimum Distance: " << new_dist; p_correction = lookahead_dist; min_dist = new_dist; } @@ -575,29 +536,28 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path break; } } - //CLOG(DEBUG, "debug") << "cbit_p is: " << cbit_p; + //CLOG(DEBUG, "mpc_debug.cbit") << "cbit_p is: " << cbit_p; // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { - //p_meas_vec.push_back((i * DT * VF) + p_correction); p_meas_vec.push_back((i * DT * VF) + p_robot); } - //CLOG(WARNING, "debug") << "p_meas_vec is: " << p_meas_vec; + //CLOG(WARNING, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; - // todo: Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values - // Note this could be combined in the previous loop too + // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values + // Note this could probably be combined in the previous loop too bool point_stabilization = false; for (int i = 0; i < p_meas_vec.size(); i++) { // handle end of path case: // if the p meas we would have needed exceeds the final measurement pose, set it equal to our last p value in the path // This will cause the intepolation to return the final cbit_path pose for all measurements past this point - //CLOG(INFO, "debug") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; - //CLOG(INFO, "debug") << "The size of cbit_p is:" << cbit_p.size(); - //CLOG(INFO, "debug") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; + //CLOG(INFO, "mpc_debug.cbit") << "The specific p_meas_vec[i] is: " << p_meas_vec[i]; + //CLOG(INFO, "mpc_debug.cbit") << "The size of cbit_p is:" << cbit_p.size(); + //CLOG(INFO, "mpc_debug.cbit") << "The final cbit_p value is:" << cbit_p[cbit_p.size()-1]; if (p_meas_vec[i] > cbit_p[cbit_p.size()-1]) { @@ -609,13 +569,10 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; - // add to measurement vector measurements.push_back(meas); - - // Find the corresponding left and right barrier q values to pass to the mpc // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) @@ -629,11 +586,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path } barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); - //CLOG(WARNING, "debug") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; - //CLOG(WARNING, "debug") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; - - - + //CLOG(WARNING, "mpc_debug.cbit") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "mpc_debug.cbit") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; } return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; @@ -641,7 +595,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path -// function takes in a the cbit path solution with a vector defining hte p axis of the path, and then a desired p_meas +// function takes in the cbit path solution with a vector defining the p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) { @@ -665,9 +619,10 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c // For yaw we need to be abit careful about sign and angle wrap around // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts - + // TODO: There is a problem here for reverse planning, will need to rotate the yaw 180 degrees in that case. + // For normal forward planning this is fine though double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - //CLOG(ERROR, "mpc.cbit") << "The Yaw Is: " << yaw_int; + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw Is: " << yaw_int; // Build the transformation matrix Eigen::Matrix4d T_ref; @@ -693,17 +648,6 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel double command_ang_z; Eigen::Matrix saturated_vel; - // Moved nan check to the main mpc solver function - /* - // First check if any of the values are nan, if so we return a zero velocity and flag the error - if (std::isnan(applied_vel(0)) || std::isnan(applied_vel(1))) - { - CLOG(ERROR, "mpc.cbit") << "NAN values detected, mpc optimization failed. Returning zero velocities"; - saturated_vel(0) = 0.0; - saturated_vel(1) = 0.0; - return saturated_vel; - } - */ if (abs(applied_vel(0)) >= v_lim) { @@ -716,11 +660,7 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel command_lin_x = -1.0* v_lim; } } - // Removed for bi-directional control purposes - //else if (applied_vel(0) <= 0.0) - //{ - // command_lin_x = 0.0; - //} + else { command_lin_x = applied_vel(0) ; @@ -737,17 +677,12 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel command_ang_z = -1.0 * w_lim; } } - //else if (applied_vel(1) <= -1*w_lim) - //{ - // command_ang_z = -1*w_lim; - //} + else { command_ang_z = applied_vel(1) ; } - // Changes for Bi-directional path traversal, we no longer want to saturate commands less than 0.0 - saturated_vel << command_lin_x, command_ang_z; return saturated_vel; } From ab03da36ac0f785b66d19abc2253da7dfba15a5b Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Mon, 24 Apr 2023 12:35:13 -0400 Subject: [PATCH 19/25] Backup of corridor mpc before overhaul for new reference pose and barrier generation scheme --- config/honeycomb_grizzly_default.yaml | 18 +++---- main/src/vtr_lidar/src/path_planning/cbit.cpp | 4 +- .../src/path_planning/mpc_path_planner2.cpp | 2 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 4 +- .../src/cbit/cbit_path_planner.cpp | 51 +++++++++++++++++-- rviz/honeycomb.rviz | 18 +++++-- 6 files changed, 74 insertions(+), 23 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 42a960cf3..22f46e1cd 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -12,7 +12,7 @@ #"navigation.command", # tactic #"tactic", - # "tactic.pipeline", + "tactic.pipeline", # "tactic.module", # "tactic.module.live_mem_manager", # "tactic.module.graph_mem_manager", @@ -409,7 +409,7 @@ ############ path planning configuration ############ path_planning: - type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version + type: cbit.lidar # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version control_period: 100 # ms teb: # vtr specific @@ -447,7 +447,7 @@ initial_samples: 300 batch_samples: 100 pre_seed_resolution: 0.5 - alpha: 1.0 + alpha: 0.5 q_max: 2.5 frame_interval: 50 iter_max: 10000000 @@ -466,11 +466,11 @@ mpc: # Controller Params - horizon_steps: 15 - horizon_step_size: 0.3 - forward_vel: 1.0 + horizon_steps: 20 + horizon_step_size: 0.2 + forward_vel: 0.85 max_lin_vel: 1.5 - max_ang_vel: 1.00 + max_ang_vel: 1.25 vehicle_model: "unicycle" # Cost Function Covariance Matrices @@ -495,8 +495,8 @@ # Cost Function Weights pose_error_weight: 5.0 - vel_error_weight: 4.0 - acc_error_weight: 4.0 + vel_error_weight: 2.0 + acc_error_weight: 1.0 kin_error_weight: 200.0 lat_error_weight: 0.5 diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 99b2a9bc4..a43716ad7 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -387,8 +387,8 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // return the computed velocity command for the first time step Command command; - command.linear.x = saturated_vel(0); - command.angular.z = saturated_vel(1); + command.linear.x = saturated_vel(0) * 1.1; // * 1.1 added to compensate for bad grizzly internal controller set points + command.angular.z = saturated_vel(1) * 1.1; // Temporary modification by Jordy to test calibration of the grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index 92440ecfb..dcf779f92 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -162,7 +162,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Only add this cost term if we are not in point stabilization mode (end of path) //if (point_stabilization == false) //{ - // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); // opt_problem.addCostTerm(vel_cost_term); //} diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 330685984..514b31b90 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -480,8 +480,8 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // return the computed velocity command for the first time step Command command; - command.linear.x = saturated_vel(0); - command.angular.z = saturated_vel(1); + command.linear.x = saturated_vel(0) * 1.1; // * 1.1 Added to compensate for bad grizzly internal controller config + command.angular.z = saturated_vel(1) * 1.1;; // Temporary modification by Jordy to test calibration of hte grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index bc8a58f6b..8748137c3 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -256,6 +256,10 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // benchmarking example code auto start_time = std::chrono::high_resolution_clock::now(); + // Variables for rolling average compute time for debug + int batches_completed = 0; + auto average_batch_time = 0.0; + bool localization_flag = true; // Set the fact we are localized if we make it to this point @@ -350,6 +354,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Perform a state update to convert the actual robot position to its corresponding pq space: p_goal = UpdateState(path_direction); + //TODO: It could be useful to convert this p_goal back to euclid and compare with se3_robot_pose to verify the conversion worked properly (error should be very low) // If its not, we probably need to handle it or return an error @@ -478,7 +483,11 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // Benchmark current compute time auto stop_time = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(stop_time - start_time); - std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; + //std::cout << "Batch Compute Time (ms): " << duration.count() << std::endl; + batches_completed = batches_completed + 1; + average_batch_time = ((batches_completed * average_batch_time) + duration.count()) / (batches_completed + 1); + //std::cout << "Average Batch Compute Time (ms): " << average_batch_time << std::endl; + compute_time = static_cast (duration.count()); @@ -640,7 +649,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // EXPERIMENTAL: Updating the dynamic corridor (now that we know our path is collision free): //auto corridor_start_time = std::chrono::high_resolution_clock::now(); - //update_corridor(corridor_ptr, path_x, path_y, *p_goal); + update_corridor(corridor_ptr, path_x, path_y, *p_goal); //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); //CLOG(ERROR, "path_planning.cbit_planner") << "Corridor Update Time: " << duration_corridor.count() << "us"; @@ -739,7 +748,11 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } - + auto start_bench_time = std::chrono::high_resolution_clock::now(); + + + + // Planning loop starts here // I think there is some gains to be had here, we actually iterate through the vertex queue twice finding minima, // I think we probably could combine the 2 vertex functions into 1 cutting compute in half (we can test to see if this is a big gain or not) @@ -749,6 +762,14 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo //auto test = BestInVertexQueue(); ExpandVertex(BestInVertexQueue()); } + + auto stop_bench_time = std::chrono::high_resolution_clock::now(); + auto duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(DEBUG, "path_planning_compute.cbit") << "Compute 1: " << duration_bench.count(); + start_bench_time = std::chrono::high_resolution_clock::now(); + + + // Generate prospective nodes std::tuple, std::shared_ptr> prospective_edge = BestInEdgeQueue(); @@ -765,6 +786,13 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo } // Remove the edge from the queue (I think best to do this inside BestInEdgeQueue function) + + + stop_bench_time = std::chrono::high_resolution_clock::now(); + duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(INFO, "path_planning_compute.cbit") << "Compute 2: " << duration_bench.count(); + start_bench_time = std::chrono::high_resolution_clock::now(); + // TODO: Collision check and update tree/queues if (vm->g_T_weighted + calc_weighted_dist(*vm, *xm, conf.alpha) + h_estimated_admissible(*xm, *p_goal) < p_goal->g_T_weighted) { @@ -776,6 +804,11 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo double actual_cost = cost_col(obs_rectangle, *vm, *xm); double weighted_cost = weighted_cost_col(obs_rectangle, *vm, *xm); + stop_bench_time = std::chrono::high_resolution_clock::now(); + duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(INFO, "path_planning_compute.cbit") << "Compute 3: " << duration_bench.count(); + start_bench_time = std::chrono::high_resolution_clock::now(); + if (g_estimated_admissible(*vm, *p_start) + weighted_cost + h_estimated_admissible(*xm, *p_goal) < p_goal->g_T_weighted) { if (vm->g_T_weighted + weighted_cost < xm->g_T_weighted) @@ -837,7 +870,10 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo //tree.QV.push_back(xm); tree.QV2.insert(std::pair>(xm->g_T_weighted + h_estimated_admissible(*xm, *p_goal), xm)); } - + stop_bench_time = std::chrono::high_resolution_clock::now(); + duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(INFO, "path_planning_compute.cbit") << "Compute 3.5: " << duration_bench.count(); + start_bench_time = std::chrono::high_resolution_clock::now(); // Generate edge, create parent chain tree.E.push_back(std::tuple, std::shared_ptr> {vm, xm}); @@ -887,8 +923,15 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo itr++; } } + stop_bench_time = std::chrono::high_resolution_clock::now(); + duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(DEBUG, "path_planning_compute.cbit") << "Compute 3.75: " << duration_bench.count(); + start_bench_time = std::chrono::high_resolution_clock::now(); } } + stop_bench_time = std::chrono::high_resolution_clock::now(); + duration_bench = std::chrono::duration_cast(stop_bench_time - start_bench_time); + CLOG(DEBUG, "path_planning_compute.cbit") << "Compute 4: " << duration_bench.count(); } // If there is no edges in the queue which can improve the current tree, clear the queues which will consequently trigger the end of a batch else diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index 22216abaf..fda7a56c3 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -21,7 +21,7 @@ Panels: - /Path2 - /Corridor Path Left1/Topic1 Splitter Ratio: 0.4507042169570923 - Tree Height: 871 + Tree Height: 443 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -85,6 +85,10 @@ Visualization Manager: Value: true front_bumblebee: Value: true + front_left_wheel_link: + Value: true + front_right_wheel_link: + Value: true front_xb3: Value: true hc: @@ -117,6 +121,10 @@ Visualization Manager: Value: true rear_bumblebee: Value: true + rear_left_wheel_link: + Value: true + rear_right_wheel_link: + Value: true rear_sensor_base_link: Value: true rear_sensor_plate_link: @@ -1445,16 +1453,16 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 27.72052001953125 + Distance: 14.629002571105957 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: -0.3220612406730652 - Y: 0.23669445514678955 - Z: 1.451749324798584 + X: 1.5698918104171753 + Y: -0.01999158412218094 + Z: 1.453581690788269 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false From 0fde636f7d2e37aeb8aca69e69a6fa66a32c866a Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 9 May 2023 11:33:03 -0400 Subject: [PATCH 20/25] Branch Cleanup --- config/honeycomb_grizzly_default.yaml | 44 ++-- launch/online_honeycomb_grizzly.launch.yaml | 2 +- .../include/vtr_lidar/path_planning/cbit.hpp | 2 +- .../path_planning/mpc_path_planner2.hpp | 15 +- .../localization/localization_icp_module.cpp | 1 + main/src/vtr_lidar/src/path_planning/cbit.cpp | 162 ++++++++++++-- .../src/path_planning/mpc_path_planner2.cpp | 208 ++++++++++++++--- .../include/vtr_path_planning/cbit/cbit.hpp | 16 +- .../vtr_path_planning/cbit/generate_pq.hpp | 2 +- .../include/vtr_path_planning/cbit/utils.hpp | 2 + .../mpc/mpc_path_planner2.hpp | 17 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 210 +++++++++++++++++- .../src/cbit/cbit_path_planner.cpp | 30 ++- .../src/cbit/generate_pq.cpp | 12 + main/src/vtr_path_planning/src/cbit/utils.cpp | 56 +++++ .../src/mpc/mpc_path_planner2.cpp | 96 ++++++-- .../path/localization_chain.hpp | 1 - rviz/honeycomb.rviz | 110 ++++++--- 18 files changed, 861 insertions(+), 125 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 22f46e1cd..303344d94 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -12,7 +12,7 @@ #"navigation.command", # tactic #"tactic", - "tactic.pipeline", + #"tactic.pipeline", # "tactic.module", # "tactic.module.live_mem_manager", # "tactic.module.graph_mem_manager", @@ -409,7 +409,7 @@ ############ path planning configuration ############ path_planning: - type: cbit.lidar # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version + type: cbit # teb.lidar for old teb version, cbit for obstacle free path tracker, cbit.lidar for obstacle avoidance version control_period: 100 # ms teb: # vtr specific @@ -467,10 +467,10 @@ mpc: # Controller Params horizon_steps: 20 - horizon_step_size: 0.2 - forward_vel: 0.85 + horizon_step_size: 0.3 + forward_vel: 1.1 max_lin_vel: 1.5 - max_ang_vel: 1.25 + max_ang_vel: 1.5 vehicle_model: "unicycle" # Cost Function Covariance Matrices @@ -486,19 +486,35 @@ #kin_error_weight: 1.0 #lat_error_weight: 0.01 + # Cost Function Covariance Matrices - pose_error_cov: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0] - vel_error_cov: [10.0, 10.0] # was [0.1 2.0] # No longer using this cost term - acc_error_cov: [2.0, 2.0] - kin_error_cov: [0.05, 0.05, 0.05, 0.05, 0.05, 0.05] - lat_error_cov: [10.0] + pose_error_cov: [30.0, 30.0, 300.0, 300.0, 300.0, 6.50] + vel_error_cov: [10.0, 100.0] # was [0.1 2.0] # No longer using this cost term + acc_error_cov: [10.0, 10.0] + kin_error_cov: [0.004, 0.004, 0.004, 0.004, 0.004, 0.004] + lat_error_cov: [20.0] # Cost Function Weights pose_error_weight: 5.0 - vel_error_weight: 2.0 - acc_error_weight: 1.0 - kin_error_weight: 200.0 - lat_error_weight: 0.5 + vel_error_weight: 5.0 + acc_error_weight: 5.0 + kin_error_weight: 1000.0 + lat_error_weight: 0.01 + + + # Cost Function Covariance Matrices + #pose_error_cov: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0] + #vel_error_cov: [10.0, 10.0] # was [0.1 2.0] # No longer using this cost term + #acc_error_cov: [2.0, 2.0] + #kin_error_cov: [0.05, 0.05, 0.05, 0.05, 0.05, 0.05] + #lat_error_cov: [10.0] + + # Cost Function Weights + #pose_error_weight: 5.0 + #vel_error_weight: 2.0 + #acc_error_weight: 1.0 + #kin_error_weight: 200.0 + #lat_error_weight: 0.5 #backup for tracking mpc #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index 090fd4094..04fea0672 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -22,7 +22,7 @@ windows: - > ros2 launch vtr_navigation vtr.launch.py base_params:=honeycomb_grizzly_default.yaml - data_dir:=${VTRDATA}/online-test-lidar/2023-04-05/2023-04-05 + data_dir:=${VTRDATA}/online-test-lidar/2023-04-27/2023-04-27 start_new_graph:=false use_sim_time:=false diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/cbit.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/cbit.hpp index abdc126e8..13034ae45 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/path_planning/cbit.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/cbit.hpp @@ -19,7 +19,7 @@ #pragma once #include "vtr_path_planning/cbit/cbit.hpp" - +#include // For benchmarking namespace vtr { namespace lidar { diff --git a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp index ebf193ed2..1909ece97 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/path_planning/mpc_path_planner2.hpp @@ -32,6 +32,9 @@ struct meas_result { std::vector measurements; bool point_stabilization; + std::vector p_interp_vec; + std::vector q_interp_vec; + }; struct meas_result2 @@ -42,6 +45,13 @@ struct meas_result2 std::vector barrier_q_right; }; +struct interp_result +{ + lgmath::se3::Transformation measurement; + double p_interp; + double q_interp; +}; + // Declaring helper functions @@ -54,8 +64,11 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity struct meas_result2 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result2 GenerateReferenceMeas4(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid, std::vector p_interp_vec); + // Helper function for post-processing and saturating the velocity command Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); // Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path -lgmath::se3::Transformation InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file +struct interp_result InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp b/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp index 212913c24..4c7fc1f8f 100644 --- a/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp +++ b/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp @@ -329,6 +329,7 @@ void LocalizationICPModule::run_(QueryCache &qdata0, OutputCache &, *qdata.T_r_v_loc = T_r_v_icp; // set success *qdata.loc_success = true; + CLOG(WARNING, "lidar.localization_icp") << "The resulting transform is:" << T_r_v_icp; } else { CLOG(WARNING, "lidar.localization_icp") << "Matched points ratio " << matched_points_ratio diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index a43716ad7..47cecb293 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -18,6 +18,7 @@ */ #include "vtr_lidar/path_planning/cbit.hpp" #include "vtr_lidar/path_planning/mpc_path_planner2.hpp" +#include "vtr_path_planning/cbit/utils.hpp" #include "vtr_lidar/cache.hpp" @@ -131,6 +132,8 @@ LidarCBIT::~LidarCBIT() { stop(); } // Given the current plan and obstacles, generate a twist command for the robot using tracking mpc auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { + auto command_start_time = std::chrono::high_resolution_clock::now(); + auto& robot_state = dynamic_cast(robot_state0); auto& chain = *robot_state.chain; if (!chain.isLocalized()) { @@ -145,7 +148,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // retrieve the transorm info from the localization chain const auto chain_info = getChainInfo(robot_state); auto [stamp, w_p_r_in_r, T_p_r, T_w_p, T_w_v_odo, T_r_v_odo, curr_sid] = chain_info; - + CLOG(WARNING, "mpc.cbit") << "The resulting transform (from chaininfo) is:" << T_p_r.inverse(); CLOG(INFO, "path_planning.cbit") << "The T_r_v_odo is: " << T_r_v_odo; CLOG(INFO, "path_planning.cbit") << "The T_p_r is: " << T_p_r; @@ -301,10 +304,10 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { w_p_r_in_r(3) = 0.0; w_p_r_in_r(4) = 0.0; w_p_r_in_r(5) = -1* vel_history.back()[1]; - CLOG(DEBUG, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; + CLOG(ERROR, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; Eigen::Matrix xi_p_r_in_r((dt - (std::floor(dt / control_period) * control_period)) * w_p_r_in_r); T_p_r2 = T_p_r2 * tactic::EdgeTransform(xi_p_r_in_r).inverse(); - CLOG(DEBUG, "mpc_debug.cbit") << "The final time period is: " << (dt - (std::floor(dt / control_period) * control_period)); + CLOG(ERROR, "mpc_debug.cbit") << "The final time period is: " << (dt - (std::floor(dt / control_period) * control_period)); const auto T_p_r_extp2 = T_p_r2; CLOG(DEBUG, "mpc_debug.cbit") << "New extrapolated pose:" << T_p_r_extp2; @@ -337,24 +340,155 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { auto measurements = meas_result.measurements; bool point_stabilization = meas_result.point_stabilization; + std::vector p_interp_vec = meas_result.p_interp_vec; + std::vector q_interp_vec = meas_result.q_interp_vec; + //CLOG(WARNING, "mpc_debug.cbit") << "The Tracking Measurements are: " << measurements; + + //CLOG(WARNING, "mpc.cbit") << "The p_interp_vec is (in cbit): " << p_interp_vec; + //CLOG(WARNING, "mpc.cbit") << "The q_interp_vec is (in cbit): " << q_interp_vec; + + // Experimental, corridor MPC reference measurement generation: - auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid); - auto measurements3 = meas_result3.measurements; - bool point_stabilization3 = meas_result3.point_stabilization; - std::vector barrier_q_left = meas_result3.barrier_q_left; - std::vector barrier_q_right = meas_result3.barrier_q_right; + //auto meas_result3 = GenerateReferenceMeas3(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid); + //auto measurements3 = meas_result3.measurements; + //bool point_stabilization3 = meas_result3.point_stabilization; + //std::vector barrier_q_left = meas_result3.barrier_q_left; + //std::vector barrier_q_right = meas_result3.barrier_q_right; // END of experimental code + // Experimental Synchronized Tracking/Teach Reference Poses: + auto meas_result4 = GenerateReferenceMeas4(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid, p_interp_vec); + auto measurements4 = meas_result4.measurements; + bool point_stabilization4 = meas_result4.point_stabilization; + std::vector barrier_q_left = meas_result4.barrier_q_left; + std::vector barrier_q_right = meas_result4.barrier_q_right; + //CLOG(ERROR, "mpc_debug.cbit") << "The New Reference Measurements are: " << measurements4; + + + + + + // Checking whether we need to correct the yaw on the cbit reference poses based on the teach path reference poses + /* + Eigen::Matrix T_ROT_YAW; + T_ROT_YAW << -1, 0, 0, 0, + 0, -1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1; + lgmath::se3::Transformation T_ROT_YAW2(T_ROT_YAW); + + CLOG(ERROR, "mpc_debug.cbit") << "THE PI ROTATION MATRIX IS: " << T_ROT_YAW; + auto cbit_pose = measurements[0].matrix().block(0, 0, 3, 3); + auto teach_pose = measurements4[0].matrix().block(0, 0, 3, 3); + auto rel_pose = teach_pose.transpose() * cbit_pose; + CLOG(ERROR, "mpc_debug.cbit") << "THE CBIT ROTATION MATRIX IS: " << cbit_pose; + CLOG(ERROR, "mpc_debug.cbit") << "THE TEACH ROTATION MATRIX IS: " << teach_pose; + CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE ROTATION MATRIX IS: " << teach_pose.transpose() * cbit_pose; + double relative_yaw = atan2(rel_pose(1,0),rel_pose(0,0)); + CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE YAW IS: " << relative_yaw; + if (fabs(relative_yaw) >= 1.57075) + { + measurements[0] = measurements[0].matrix() * T_ROT_YAW2; + CLOG(ERROR, "mpc_debug.cbit") << "CORRECTED THE YAW"; + } + CLOG(ERROR, "mpc_debug.cbit") << "THE ROTATED CBIT MATRIX IS: " << measurements[0]; + */ + + + + + // Generate the barrier terms: + // To do this, we need to conver the p_interp_vec,q_interp_vec into euclidean, as well as pairs of (p_interp,q_max), (q_interp,-q_max) + // Then collision check between them + //Node test_node = curve_to_euclid(Node(0.0, 0.0)); + Node test_node = curve_to_euclid(Node(10.0, 0.6)); + //CLOG(ERROR, "mpc_debug.cbit") << "The Euclidean Node is x: " << test_node.p << ", y: "<< test_node.q << ", z: " << test_node.z; + std::vector barrier_q_left_test; + std::vector barrier_q_right_test; + for (int i = 0; i ref_pose_vec1; + for (int i = 0; i ref_pose_vec2; + for (int i = 0; i mpc_poses; try { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; + // Using new sychronized measurements: + auto mpc_result = SolveMPC2(applied_vel, T0, measurements4, measurements, barrier_q_left_test, barrier_q_right_test, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + // Solve using corridor mpc - auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); // Old path tracking configs - //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version applied_vel = mpc_result.applied_vel; // note dont re-declare applied vel here mpc_poses = mpc_result.mpc_poses; @@ -383,12 +517,12 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { robot_poses.push_back(T_w_p * T_p_r); // Send the robot poses and mpc prediction to rviz - visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses); + visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses, ref_pose_vec1, ref_pose_vec2); // return the computed velocity command for the first time step Command command; command.linear.x = saturated_vel(0) * 1.1; // * 1.1 added to compensate for bad grizzly internal controller set points - command.angular.z = saturated_vel(1) * 1.1; + command.angular.z = saturated_vel(1); // Temporary modification by Jordy to test calibration of the grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); @@ -400,7 +534,9 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { << command.linear.y << ", " << command.linear.z << ", " << command.angular.x << ", " << command.angular.y << ", " << command.angular.z << "]"; - + auto command_stop_time = std::chrono::high_resolution_clock::now(); + auto duration_command = std::chrono::duration_cast(command_stop_time - command_start_time); + CLOG(ERROR, "mpc.cbit") << "ComputeCommand took: " << duration_command.count() << "ms"; return command; } // Otherwise stop the robot diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index dcf779f92..c572fa2d9 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -32,6 +32,46 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { + // Pre-process the measurements to correct the cbit path yaw values depending on if the robot should be moving in forward or reverse + //Eigen::Matrix T_ROT_YAW; + //T_ROT_YAW << -1, 0, 0, 0, + // 0, -1, 0, 0, + // 0, 0, 1, 0, + // 0, 0, 0, 1; + //lgmath::se3::Transformation T_ROT_YAW2(T_ROT_YAW); + //CLOG(ERROR, "mpc_debug.cbit") << "THE PI ROTATION MATRIX IS: " << T_ROT_YAW2; + + //for (int i = 0; i < measurements_cbit.size(); i++) + //{ + // auto test1 = measurements[i].matrix().block(0, 0, 3, 3); + // auto test2 = measurements_cbit[i].matrix().block<3, 1>(0, 3); + // CLOG(ERROR, "mpc_debug.cbit") << "TEST2 IS: " << test2; + // auto cbit_pose = measurements_cbit[i].matrix().block(0, 0, 3, 3); + // auto teach_pose = measurements[i].matrix().block(0, 0, 3, 3); + // CLOG(ERROR, "mpc_debug.cbit") << "THE CBIT MATRIX IS: " << measurements_cbit[i]; + // CLOG(ERROR, "mpc_debug.cbit") << "THE TEACH MATRIX IS: " << measurements[i]; + //measurements_cbit[i] = measurements[i]; + // lgmath::se3::Transformation new_meas_cbit(test1,test2); + // measurements_cbit[i] = new_meas_cbit; + + // CLOG(ERROR, "mpc_debug.cbit") << "THE ROTATED CBIT MATRIX IS: " << measurements_cbit[i]; + //auto rel_pose = teach_pose.transpose() * cbit_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE CBIT ROTATION MATRIX IS: " << cbit_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE TEACH ROTATION MATRIX IS: " << teach_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE ROTATION MATRIX IS: " << teach_pose.transpose() * cbit_pose; + //double relative_yaw = atan2(rel_pose(1,0),rel_pose(0,0)); + //CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE YAW IS: " << relative_yaw; + //if (fabs(relative_yaw) >= 1.57075) + //{ + // measurements_cbit[i] = measurements_cbit[i] * T_ROT_YAW2; + // CLOG(ERROR, "mpc_debug.cbit") << "CORRECTED THE YAW"; + //} + //CLOG(ERROR, "mpc_debug.cbit") << "THE ROTATED CBIT MATRIX IS: " << measurements_cbit[i]; + //} + // End of pre-pre-processing measurement rotations + + + // Conduct an MPC Iteration given current configurations // Velocity set-points (desired forward velocity and angular velocity), here we set a static forward target velocity, and try to minimize rotations (0rad/sec) @@ -124,7 +164,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup the optimization problem steam::OptimizationProblem opt_problem; - for (int i=0; i previous_vel, lgmath::se for (int i = 0; i < K; i++) { // Pose Error - const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); - const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); - opt_problem.addCostTerm(pose_cost_term); + if (i > 0) + { + const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); + const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); + opt_problem.addCostTerm(pose_cost_term); + } // Kinematic constraints (softened but penalized heavily) if (i < (K-1)) @@ -155,17 +198,19 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se opt_problem.addCostTerm(kin_cost_term); // Non-Zero Velocity Penalty (penalty of non resting control effort helps with point stabilization) - const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); - opt_problem.addCostTerm(vel_cost_term); + //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); + //opt_problem.addCostTerm(vel_cost_term); // Velocity set-point constraint - No longer using this due to complications when repeating a path in reverse // Only add this cost term if we are not in point stabilization mode (end of path) - //if (point_stabilization == false) - //{ - // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); - // opt_problem.addCostTerm(vel_cost_term); - //} - + + if (point_stabilization == false) + { + const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); + opt_problem.addCostTerm(vel_cost_term); + } + + // Acceleration Constraints if (i == 0) { @@ -180,19 +225,19 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } - + } // Laterial Barrier State Constraints - if (i > 0) + if (i >= 0) { // Generate a locked transform evaluator to store the current measurement for state constraints // The reason we make it a variable and lock it is so we can use the built in steam evaluators which require evaluable inputs measurement_vars.push_back(steam::se3::SE3StateVar::MakeShared(measurements[i])); - measurement_vars[i-1]->locked() = true; + measurement_vars[i]->locked() = true; // Take the compose inverse of the locked measurement w.r.t the state transforms - const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i-1], pose_state_vars[i]); + const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i], pose_state_vars[i]); // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); @@ -239,14 +284,26 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Solve the optimization problem with GuassNewton solver //using SolverType = steam::GaussNewtonSolver; // Old solver, does not have back stepping capability using SolverType = steam::LineSearchGaussNewtonSolver; + //using SolverType = steam::DoglegGaussNewtonSolver; // Initialize solver parameters SolverType::Params params; - params.verbose = false; // Makes the output display for debug when true + params.verbose = true; // Makes the output display for debug when true params.relative_cost_change_threshold = 1e-6; - params.max_iterations = 200; + params.max_iterations = 100; params.absolute_cost_change_threshold = 1e-6; - params.backtrack_multiplier = 0.5; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver - params.max_backtrack_steps = 1000; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver + params.backtrack_multiplier = 0.5; // Line Search Specific Params, will fail to build if using GaussNewtonSolver + params.max_backtrack_steps = 200; // Line Search Specific Params, will fail to build if using GaussNewtonSolver + //params.ratio_threshold_shrink = 0.1; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Grow trust region if ratio of actual to predicted cost reduction above + /// this (range: 0.0-1.0) + //params.ratio_threshold_grow = 0.9; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Amount to shrink by (range: <1.0) + //params.shrink_coeff = 0.9; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Amount to grow by (range: >1.0) + //params.grow_coeff = 1.1; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Maximum number of times to shrink trust region before giving up + //params.max_shrink_steps = 200; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + SolverType solver(opt_problem, params); // Solve the optimization problem @@ -377,6 +434,9 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; + std::vector p_interp_vec; + std::vector q_interp_vec; + p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { @@ -403,13 +463,20 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi point_stabilization = true; // Enable point stabilization configs CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } - lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + struct interp_result meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); // add to measurement vector - measurements.push_back(meas); + measurements.push_back(meas.measurement); + p_interp_vec.push_back(meas.p_interp); + q_interp_vec.push_back(meas.q_interp); + } - return {measurements, point_stabilization}; + //CLOG(WARNING, "mpc.cbit") << "The Approximate Robot State P value is (ref meas): " << p_correction; + //CLOG(WARNING, "mpc.cbit") << "The p_interp_vec is: " << p_interp_vec; + //CLOG(WARNING, "mpc.cbit") << "The q_interp_vec is: " << q_interp_vec; + + return {measurements, point_stabilization, p_interp_vec, q_interp_vec}; } // For generating VT&R teach path measurements used in the corridor mpc @@ -437,7 +504,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(DEBUG, "mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot - CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); for (int i = (current_sid-1); i <= teach_path.size(); i++) { cbit_path.push_back(teach_path[i]); @@ -491,6 +558,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path p_robot = 0.0; } + CLOG(WARNING, "mpc_debug.cbit") << "The p value for the robot is: " << p_robot + sid_p; + // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -544,7 +613,6 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path { p_meas_vec.push_back((i * DT * VF) + p_robot); } - //CLOG(WARNING, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could probably be combined in the previous loop too @@ -565,11 +633,11 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } - lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + struct interp_result meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; // add to measurement vector - measurements.push_back(meas); + measurements.push_back(meas.measurement); // Find the corresponding left and right barrier q values to pass to the mpc @@ -592,11 +660,60 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; } +// For generating VT&R teach path measurements used in the corridor mpc (new version which directly uses the interpolated p measurements from the cbit path trajectory tracking) +struct meas_result3 GenerateReferenceMeas4(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid, std::vector p_interp_vec) +{ + // Set point stabilization, but just note if we use this function in the cbit.cpp file we need to use the Tracking reference pose point stabilization instead + bool point_stabilization = false; + // Initialize vectors storing the barrier values: + std::vector barrier_q_left; + std::vector barrier_q_right; + + // load the teach path + std::vector teach_path = global_path_ptr->disc_path; + std::vector teach_path_p = global_path_ptr->p; + + + std::vector measurements; + + //CLOG(ERROR, "mpc_debug.cbit") << "The p_interp_vec is: " << p_interp_vec; + //CLOG(ERROR, "mpc_debug.cbit") << "The Global Path p is: " << teach_path_p; + + // Iterate through the interpolated p_measurements and make interpolate euclidean measurements from the teach path + for (int i = 0; i < p_interp_vec.size(); i++) + { + + struct interp_result meas = InterpolateMeas2(p_interp_vec[i], teach_path_p, teach_path); + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + + // add to measurement vector + measurements.push_back(meas.measurement); + + + // Find the corresponding left and right barrier q values to pass to the mpc + + // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) + // To find the corresponding values, we just need to query the corridor_ptr given the current sid_p + p_meas_vec[i], and return the q values for that bin + double p_query = p_interp_vec[i]; + // this isnt the most efficient way of doing this, but it should be fine, we really only need to run this loop 10-20 times and the size is likely less then 1000 each + int p_ind = 0; + while (corridor_ptr->p_bins[p_ind] <= p_query) + { + p_ind++; + } + barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); + barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); + //CLOG(WARNING, "mpc_debug.cbit") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "mpc_debug.cbit") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; + } + + return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; +} // function takes in the cbit path solution with a vector defining the p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. -lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) +struct interp_result InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) { // Find the lower bound of the p values for (int i = 0; i < cbit_p.size(); i++) @@ -612,16 +729,38 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c Pose pose_lower = cbit_path[i-1]; Pose pose_upper = cbit_path[i]; - double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); - double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); - double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.z - pose_lower.z); // For yaw we need to be abit careful about sign and angle wrap around // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts // TODO: There is a problem here for reverse planning, will need to rotate the yaw 180 degrees in that case. // For normal forward planning this is fine though - double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw Is: " << yaw_int; + + // This interpolation if we do have yaw available (when the input path is the teach path as it is for corridor mpc) + double yaw_int; + //yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + if ((pose_lower.yaw == 0.0) && (pose_upper.yaw == 0.0)) + { + // Yaw interpolation when we dont have yaw available explicitly (i.e from cbit path euclid conversion) + yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw For CBIT PATH is: " << yaw_int; + } + else + { + // if this is used on the cbit path, it will just return 0.0 every time (note this will break tracking control if yaw covariance set low) + //yaw_int = pose_lower.yaw + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.yaw - pose_lower.yaw); + yaw_int = pose_lower.yaw; + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw For TEACH PATH is: " << yaw_int; + } + + + // we also want to interpolate p and q values based on the original p,q from the cbit_path. We use this afterwards for finding appropriate corridor mpc + // reference poses on the teach path + double p_int = pose_lower.p + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.p - pose_lower.p); + double q_int = pose_lower.q + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.q - pose_lower.q); // Build the transformation matrix Eigen::Matrix4d T_ref; @@ -634,7 +773,8 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; - return meas; + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement P,Q value is - p: " << p_int << " q: " << q_int; + return {meas, p_int, q_int}; } } } diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index 39d5c2b55..40d1f4d5e 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -34,6 +34,7 @@ #include "nav_msgs/msg/path.hpp" #include "tf2_ros/transform_broadcaster.h" #include "std_msgs/msg/string.hpp" +#include "geometry_msgs/msg/pose_array.hpp" #include "vtr_path_planning/cbit/cbit_config.hpp" #include "vtr_path_planning/base_path_planner.hpp" @@ -143,8 +144,17 @@ class CBIT : public BasePathPlanner { void initializeRoute(RobotState& robot_state) override; // Declare this as virtual so that the Lidarcbit can override it if using that static name //void initializeRouteTest(RobotState& robot_state) override; Command computeCommand(RobotState& robot_state) override; - void visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform& T_w_p,const tactic::EdgeTransform& T_p_r, const tactic::EdgeTransform& T_p_r_extp, const tactic::EdgeTransform& T_p_r_extp_mpc, std::vector mpc_prediction, std::vector robot_prediction); - + void visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform& T_w_p,const tactic::EdgeTransform& T_p_r, const tactic::EdgeTransform& T_p_r_extp, const tactic::EdgeTransform& T_p_r_extp_mpc, std::vector mpc_prediction, std::vector robot_prediction, std::vector ref_pose_vec1, std::vector ref_pose_vec2); + Node curve_to_euclid(Node node); + Pose lin_interpolate(int p_ind, double p_val); + bool costmap_col_tight(Node node); + struct collision_result + { + bool bool_result; + Node col_node; + }; + struct collision_result discrete_collision_v2(double discretization, Node start, Node end); + protected: struct ChainInfo { tactic::Timestamp stamp; @@ -171,6 +181,8 @@ class CBIT : public BasePathPlanner { rclcpp::Publisher::SharedPtr path_pub_; rclcpp::Publisher::SharedPtr corridor_pub_l_; rclcpp::Publisher::SharedPtr corridor_pub_r_; + rclcpp::Publisher::SharedPtr ref_pose_pub1_; + rclcpp::Publisher::SharedPtr ref_pose_pub2_; // Pointers to the output path std::vector cbit_path; diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp index a2d109bd4..e8e8607aa 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/generate_pq.hpp @@ -40,7 +40,7 @@ class CBITPath { std::vector sid; // store the sid value of the transform from the teach path CBITPath(CBITConfig config, std::vector initial_path); // constructor; need to feed this the path CBITPath() = default; - + Pose interp_pose(double p_in); // Function to interpolate a pose from the teach path given a p value double delta_p_calc(Pose start_pose, Pose end_pose, double alpha); // Function for computing delta p intervals in p,q space // Internal function declarations diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp index e46d259ff..40c6fb84a 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/utils.hpp @@ -38,6 +38,8 @@ class Pose { double roll; double pitch; double yaw; + double p; + double q; Pose* parent = nullptr; // Use a pointer to a Pose class to keep track of a parent Pose(double x_in, double y_in, double z_in, double roll_in, double pitch_in, double yaw_in) // Pose constructor diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp index ce84a3571..1b4d5632e 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/mpc_path_planner2.hpp @@ -32,6 +32,8 @@ struct meas_result { std::vector measurements; bool point_stabilization; + std::vector p_interp_vec; + std::vector q_interp_vec; }; struct meas_result3 @@ -42,6 +44,14 @@ struct meas_result3 std::vector barrier_q_right; }; +struct interp_result +{ + lgmath::se3::Transformation measurement; + double p_interp; + double q_interp; +}; + + // Declaring helper functions // Primary optimization function: Takes in the input configurations and the extrapolated robot pose, outputs a vector for the velocity to apply and the predicted horizon @@ -51,11 +61,14 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbit_path_ptr, std::tuple robot_pose, int K, double DT, double VF); // Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity -struct meas_result3 GenerateReferenceMeas3(std::shared_ptr cbit_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); +struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid); + +// Helper function for generating reference measurements poses from a discrete path to use for tracking the path at a desired forward velocity +struct meas_result3 GenerateReferenceMeas4(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid, std::vector p_interp_vec); // Helper function for post-processing and saturating the velocity command Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel, double v_lim, double w_lim); // Helper function in Generate Reference Meas which interpolates a Transformation measurement gen the cbit_path and the desired measurements p value along the path -lgmath::se3::Transformation InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file +struct interp_result InterpolateMeas2(double p_meas, std::vector cbit_p, std::vector cbit_path); \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 514b31b90..bb0757843 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -132,6 +132,8 @@ CBIT::CBIT(const Config::ConstPtr& config, path_pub_ = node->create_publisher("planning_path", 10); corridor_pub_l_ = node->create_publisher("corridor_path_left", 10); corridor_pub_r_ = node->create_publisher("corridor_path_right", 10); + ref_pose_pub1_ = node->create_publisher("mpc_ref_pose_array1", 10); + ref_pose_pub2_ = node->create_publisher("mpc_ref_pose_array2", 10); // Updating cbit_configs // Environment @@ -238,6 +240,7 @@ void CBIT::initializeRoute(RobotState& robot_state) { se3_vector = T2xyzrpy(teach_frame); se3_pose = Pose(std::get<0>(se3_vector), std::get<1>(se3_vector), std::get<2>(se3_vector), std::get<3>(se3_vector), std::get<4>(se3_vector), std::get<5>(se3_vector)); euclid_path_vec.push_back(se3_pose); + //CLOG(ERROR, "path_planning.cbit") << "Global Path Yaw values: " << i << ": " << std::get<5>(se3_vector); } @@ -248,7 +251,7 @@ void CBIT::initializeRoute(RobotState& robot_state) { auto world_frame_pose = T2xyzrpy(T_w_p * T_p_r); auto test1 = euclid_path_vec[0]; auto test2 = euclid_path_vec[1]; - auto path_yaw = std::atan2((test2.y-test1.y),(test2.x-test1.y)); + auto path_yaw = std::atan2((test2.y-test1.y),(test2.x-test1.x)); auto pose_graph_yaw = std::get<5>(world_frame_pose); CLOG(INFO, "path_planning.cbit") << "The path_yaw is: " << path_yaw; CLOG(INFO, "path_planning.cbit") << "The pose_graph yaw is: " << pose_graph_yaw; @@ -439,6 +442,19 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { std::vector barrier_q_right = meas_result3.barrier_q_right; // END of experimental code + std::vector ref_pose_vec1; + for (int i = 0; i ref_pose_vec2; + for (int i = 0; i mpc_poses; @@ -476,12 +492,12 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { robot_poses.push_back(T_w_p * T_p_r); // Send the robot poses and mpc prediction to rviz - visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses); + visualize(stamp, T_w_p, T_p_r, T_p_r_extp, T_p_r_extp2, mpc_poses, robot_poses, ref_pose_vec1, ref_pose_vec2); // return the computed velocity command for the first time step Command command; command.linear.x = saturated_vel(0) * 1.1; // * 1.1 Added to compensate for bad grizzly internal controller config - command.angular.z = saturated_vel(1) * 1.1;; + command.angular.z = saturated_vel(1); // Temporary modification by Jordy to test calibration of hte grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); @@ -524,7 +540,7 @@ auto CBIT::getChainInfo(RobotState& robot_state) -> ChainInfo { } // Visualizing robot pose and the planned paths in rviz -void CBIT::visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform& T_w_p, const tactic::EdgeTransform& T_p_r, const tactic::EdgeTransform& T_p_r_extp, const tactic::EdgeTransform& T_p_r_extp_mpc, std::vector mpc_prediction, std::vector robot_prediction) +void CBIT::visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform& T_w_p, const tactic::EdgeTransform& T_p_r, const tactic::EdgeTransform& T_p_r_extp, const tactic::EdgeTransform& T_p_r_extp_mpc, std::vector mpc_prediction, std::vector robot_prediction, std::vector ref_pose_vec1, std::vector ref_pose_vec2) { /// Publish the current frame for planning { @@ -671,8 +687,194 @@ void CBIT::visualize(const tactic::Timestamp& stamp, const tactic::EdgeTransform corridor_pub_l_->publish(corridor_left); corridor_pub_r_->publish(corridor_right); } + + // Attempting to Publish the reference poses used in the mpc optimization as a pose array + { + // create a PoseArray message + geometry_msgs::msg::PoseArray pose_array_msg; + pose_array_msg.header.frame_id = "world"; + + // fill the PoseArray with some sample poses + for (int i = 0; i < ref_pose_vec1.size(); i++) { + geometry_msgs::msg::Pose pose; + auto T1 = ref_pose_vec1[i].matrix(); + pose.position.x = T1(0,3); + pose.position.y = T1(1,3);; + pose.position.z = T1(2,3);; + pose.orientation.w = 1.0; + pose_array_msg.poses.push_back(pose); + } + ref_pose_pub1_->publish(pose_array_msg); + } + + // Attempting to Publish the reference poses used in the mpc optimization as a pose array + { + // create a PoseArray message + geometry_msgs::msg::PoseArray pose_array_msg; + pose_array_msg.header.frame_id = "world"; + + // fill the PoseArray with some sample poses + for (int i = 0; i < ref_pose_vec2.size(); i++) { + geometry_msgs::msg::Pose pose; + auto T2 = ref_pose_vec2[i].matrix(); + pose.position.x = T2(0,3); + pose.position.y = T2(1,3);; + pose.position.z = T2(2,3);; + pose.orientation.w = 1.0; + pose_array_msg.poses.push_back(pose); + } + ref_pose_pub2_->publish(pose_array_msg); + } return; } + +// Function for converting a p,q coordinate value into a euclidean coordinate using the pre-processed path to follow +Node CBIT::curve_to_euclid(Node node) +{ + double p_val = node.p; + double q_val = node.q; + int p_ind = bisection(global_path_ptr->p, p_val); + + // Linearly interpolate a Euclidean Pose using the euclidean path and the relative p_val,q_val + // TODO: need to use steam or lgmath se(3) classes for these poses, for now just using a vector + Pose pose_c = lin_interpolate(p_ind, p_val); + + double x_i = pose_c.x - sin(pose_c.yaw)*q_val; + double y_i = pose_c.y + cos(pose_c.yaw)*q_val; + + // Experimental, also interpolate the z value + double z_i = pose_c.z; //+ cos(pose_c.yaw)*q_val; // I think here we just want to set the z to whatever the pose_c.z value + + return Node(x_i,y_i,z_i); +} + +Pose CBIT::lin_interpolate(int p_ind, double p_val) +{ + double p_max = global_path_ptr->p[(global_path_ptr->p.size() - 1)]; //TODO: Replace this with se(3) + double p_lower; + double p_upper; + if (p_val >= p_max) // if p_val is exactly the max (goal p) then return the final euclid pose + { + return Pose(global_path_ptr->path[(global_path_ptr->path.size() - 1)]); + } + + else + { + p_upper = global_path_ptr->p[p_ind + 1]; + p_lower = global_path_ptr->p[p_ind]; + } + + Pose start_pose = global_path_ptr->path[p_ind]; + Pose end_pose = global_path_ptr->path[p_ind + 1]; + + double x_c = start_pose.x + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.x - start_pose.x); + double y_c = start_pose.y + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.y - start_pose.y); + double z_c = start_pose.z + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.z - start_pose.z); + + // For angles, we dont really care about roll and pitch, these can be left 0 (atleast for now) + // For yaw need to be very careful of angle wrap around problem: + double angle_difference = std::fmod((std::fmod((end_pose.yaw - start_pose.yaw),(2.0*M_PI)) + (3.0*M_PI)),(2.0*M_PI)) - M_PI; // CHECK THIS! + double yaw_c = start_pose.yaw + ((p_val - p_lower) / (p_upper - p_lower)) * angle_difference; + + return Pose({x_c, y_c, z_c, 0.0, 0.0, yaw_c}); + +} + + +struct CBIT::collision_result CBIT::discrete_collision_v2(double discretization, Node start, Node end) +{ + // We dynamically determine the discretization based on the length of the edge + discretization = ceil(calc_dist(start, end) * discretization); + + // Generate discretized test nodes + std::vector p_test; + std::vector q_test; + + double p_step = fabs(end.p - start.p) / discretization; + double q_step = fabs(end.q - start.q) / discretization; + + p_test.push_back(start.p); + q_test.push_back(start.q); + + for (int i = 0; i < discretization-1; i++) + { + p_test.push_back(p_test[i] + p_step*sgn(end.p-start.p) ); + q_test.push_back(q_test[i] + q_step*sgn(end.q-start.q) ); + } + p_test.push_back(end.p); + q_test.push_back(end.q); + + + + // Loop through the test curvilinear points, convert to euclid, collision check obstacles + Node curv_pt; + for (int i = 0; i < p_test.size(); i++) + { + curv_pt = Node(p_test[i], q_test[i]); + + // Convert to euclid TODO: + //Node euclid_pt = curv_pt; // DEBUG DO NOT LEAVE THIS HERE, NEED TO REPLACE WITH COLLISION CHECK FUNCTION + Node euclid_pt = curve_to_euclid(curv_pt); + + if (costmap_col_tight(euclid_pt)) + { + return {true, curv_pt}; + } + + + } + + return {false, curv_pt}; +} + + +// This collision check is only used at the end of each batch and determines whether the path should be rewired using the bare minimum obstacle distance +// Under normal operation we plan paths around a slightly more conservative buffer around each obstacle (equal to influence dist + min dist) +bool CBIT::costmap_col_tight(Node node) +{ + //CLOG(DEBUG, "path_planning.cbit_planner") << "Original Node: x: " << node.p << " y: " << node.q << " z: " << node.z; + Eigen::Matrix test_pt({node.p, node.q, node.z, 1}); + + // I am no longer temporally filtering here, instead this takes place in the costmap itself in the change detection module + // This means the costmap and transform size should never be more than 1 + //for (int i = 0; i < cbit_costmap_ptr->T_c_w_vect.size(); i++) + //{ + + + auto collision_pt = costmap_ptr->T_c_w * test_pt; + + // Round the collision point x and y values down to the nearest grid resolution so that it can be found in the obstacle unordered_map + float x_key = floor(collision_pt[0] / costmap_ptr->grid_resolution) * costmap_ptr->grid_resolution; + float y_key = floor(collision_pt[1] / costmap_ptr->grid_resolution) * costmap_ptr->grid_resolution; + + //CLOG(DEBUG, "path_planning.cbit_planner") << "X_key: " << x_key; + //CLOG(DEBUG, "path_planning.cbit_planner") << "Y_key: " << y_key; + + float grid_value; + + // Check to see if the point is in the obstacle map + // We need to use a try/catch in this metod as if the key value pair doesnt exist (it usually wont) we catch an out of range error + try + { + // Block of code to try + grid_value = costmap_ptr->obs_map.at(std::pair (x_key, y_key)); + //CLOG(ERROR, "path_planning.cbit_planner") << "Key Value: " << grid_value; + } + catch (std::out_of_range) + { + grid_value = 0.0; + } + + if (grid_value >= 0.89) // By switching this from > 0.0 to 0.9, we effectively only collision check the path out to the "minimum_distance" obs config param + { + return true; + } + //} + + // If we make it here can return false for no collision + return false; +} + } // namespace path_planning } // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index 8748137c3..a476625fb 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -649,7 +649,7 @@ void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robo // EXPERIMENTAL: Updating the dynamic corridor (now that we know our path is collision free): //auto corridor_start_time = std::chrono::high_resolution_clock::now(); - update_corridor(corridor_ptr, path_x, path_y, *p_goal); + //update_corridor(corridor_ptr, path_x, path_y, *p_goal); //auto corridor_stop_time = std::chrono::high_resolution_clock::now(); //auto duration_corridor = std::chrono::duration_cast(corridor_stop_time - corridor_start_time); //CLOG(ERROR, "path_planning.cbit_planner") << "Corridor Update Time: " << duration_corridor.count() << "us"; @@ -1685,6 +1685,14 @@ std::vector CBITPlanner::ExtractEuclidPath() // Experimental std::vector path_z; + std::vector path_yaw; + std::vector path_p; + std::vector path_q; + + // experimental, for grabing the yaw value from the teach path + std::vector p = global_path->p; + std::vector disc_path = global_path->disc_path; + // End of experimental std::vector node_list = {node}; node_list.reserve(1000); // Maybe dynamically allocate this size based on path length/discretization param @@ -1731,8 +1739,23 @@ std::vector CBITPlanner::ExtractEuclidPath() path_x.push_back(euclid_pt.p); path_y.push_back(euclid_pt.q); path_z.push_back(euclid_pt.z); + + // Experimental yaw estimation from teach path + int p_iter = 0; + while (p_disc[i] > p[p_iter]) + { + p_iter = p_iter+1; + } + path_yaw.push_back(disc_path[p_iter-1].yaw); + // End Experimental + + + //For debug //path_z.push_back(0.0); + // Also adding the p,q to the euclidean path for reference + path_p.push_back(p_disc[i]); + path_q.push_back(q_disc[i]); } @@ -1745,7 +1768,10 @@ std::vector CBITPlanner::ExtractEuclidPath() std::vector pose_vector; for (int i = 0; i initial_path) } +// note made this pretty quick and dirty, need to refine with interpolation next +Pose CBITPath::interp_pose(double p_in) +{ + int p_iter = 0; + while (p_in > this->p[p_iter]) + { + p_iter = p_iter+1; + } + Pose interpolated_pose(this->disc_path[p_iter].x, this->disc_path[p_iter].y, this->disc_path[p_iter].z, this->disc_path[p_iter].roll, this->disc_path[p_iter].pitch, this->disc_path[p_iter].yaw); + return interpolated_pose; +} + // Calculating the distance between se(3) poses including a heading contribution double CBITPath::delta_p_calc(Pose start_pose, Pose end_pose, double alpha) { diff --git a/main/src/vtr_path_planning/src/cbit/utils.cpp b/main/src/vtr_path_planning/src/cbit/utils.cpp index fea689a3a..b5d68aa0d 100644 --- a/main/src/vtr_path_planning/src/cbit/utils.cpp +++ b/main/src/vtr_path_planning/src/cbit/utils.cpp @@ -235,6 +235,62 @@ Node curve_to_euclid(Node node, std::vector<) } */ + + +/* +// Function for converting a p,q coordinate value into a euclidean coordinate using the pre-processed path to follow +Node curve_to_euclid2(Node node, std::shared_ptr global_path_ptr;) +{ + double p_val = node.p; + double q_val = node.q; + int p_ind = bisection(global_path->p, p_val); + + // Linearly interpolate a Euclidean Pose using the euclidean path and the relative p_val,q_val + // TODO: need to use steam or lgmath se(3) classes for these poses, for now just using a vector + Pose pose_c = lin_interpolate(p_ind, p_val); + + double x_i = pose_c.x - sin(pose_c.yaw)*q_val; + double y_i = pose_c.y + cos(pose_c.yaw)*q_val; + + // Experimental, also interpolate the z value + double z_i = pose_c.z; //+ cos(pose_c.yaw)*q_val; // I think here we just want to set the z to whatever the pose_c.z value + + return Node(x_i,y_i,z_i); +} + +Pose lin_interpolate2(int p_ind, double p_val) +{ + double p_max = global_path->p[(global_path->p.size() - 1)]; //TODO: Replace this with se(3) + double p_lower; + double p_upper; + if (p_val >= p_max) // if p_val is exactly the max (goal p) then return the final euclid pose + { + return Pose(global_path->path[(global_path->path.size() - 1)]); + } + + else + { + p_upper = global_path->p[p_ind + 1]; + p_lower = global_path->p[p_ind]; + } + + Pose start_pose = global_path->path[p_ind]; + Pose end_pose = global_path->path[p_ind + 1]; + + double x_c = start_pose.x + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.x - start_pose.x); + double y_c = start_pose.y + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.y - start_pose.y); + double z_c = start_pose.z + ((p_val - p_lower) / (p_upper - p_lower)) * (end_pose.z - start_pose.z); + + // For angles, we dont really care about roll and pitch, these can be left 0 (atleast for now) + // For yaw need to be very careful of angle wrap around problem: + double angle_difference = std::fmod((std::fmod((end_pose.yaw - start_pose.yaw),(2.0*M_PI)) + (3.0*M_PI)),(2.0*M_PI)) - M_PI; // CHECK THIS! + double yaw_c = start_pose.yaw + ((p_val - p_lower) / (p_upper - p_lower)) * angle_difference; + + return Pose({x_c, y_c, z_c, 0.0, 0.0, yaw_c}); + +} +*/ + // Function for quickly finding the index j for the value in a sorted vector which is immediately below the function value. // in curve_to_euclid we use this function to efficiently find the index of the euclidean se(3) pose stored in the discrete path which // immediately preceders the current p,q point we are trying to convert back to euclidean. diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index af4e41b08..a857a026a 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -163,7 +163,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Only add this cost term if we are not in point stabilization mode (end of path) //if (point_stabilization == false) //{ - // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, sharedLossFunc); + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); // opt_problem.addCostTerm(vel_cost_term); //} @@ -329,6 +329,7 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; + // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -378,6 +379,9 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Determine the p_values we need for our measurement horizon, corrected for the p value of the closest point on the path to the current robot state std::vector p_meas_vec; std::vector measurements; + std::vector p_interp_vec; + std::vector q_interp_vec; + p_meas_vec.reserve(K); for (int i = 0; i < K; i++) { @@ -404,13 +408,20 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi point_stabilization = true; // Enable point stabilization configs CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } - lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + struct interp_result meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); // add to measurement vector - measurements.push_back(meas); + measurements.push_back(meas.measurement); + p_interp_vec.push_back(meas.p_interp); + q_interp_vec.push_back(meas.q_interp); + } - return {measurements, point_stabilization}; + //CLOG(WARNING, "mpc.cbit") << "The Approximate Robot State P value is (ref meas): " << p_correction; + //CLOG(WARNING, "mpc.cbit") << "The p_interp_vec is: " << p_interp_vec; + //CLOG(WARNING, "mpc.cbit") << "The q_interp_vec is: " << q_interp_vec; + + return {measurements, point_stabilization, p_interp_vec, q_interp_vec}; } // For generating VT&R teach path measurements used in the corridor mpc @@ -438,7 +449,7 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path //CLOG(DEBUG, "mpc_debug.cbit") << "The global reference p is: " << sid_p; // I use sid -1 to be conservative, because I think its possible the robot pose is being localized in the frame ahead of the robot - CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "The size of the teach_path is: " << teach_path.size(); for (int i = (current_sid-1); i <= teach_path.size(); i++) { cbit_path.push_back(teach_path[i]); @@ -545,7 +556,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path { p_meas_vec.push_back((i * DT * VF) + p_robot); } - //CLOG(WARNING, "mpc_debug.cbit") << "p_meas_vec is: " << p_meas_vec; + CLOG(ERROR, "mpc.cbit") << "The Approximate Robot State P value is (teach meas): " << p_robot; + CLOG(ERROR, "mpc_debug.cbit") << "p_meas_vec is (teach path): " << p_meas_vec; // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could probably be combined in the previous loop too @@ -566,11 +578,11 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path CLOG(INFO, "mpc.cbit") << "Approaching End of Path, Converting MPC to Point Stabilization Problem"; } - lgmath::se3::Transformation meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); + struct interp_result meas = InterpolateMeas2(p_meas_vec[i], cbit_p, cbit_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; // add to measurement vector - measurements.push_back(meas); + measurements.push_back(meas.measurement); // Find the corresponding left and right barrier q values to pass to the mpc @@ -593,11 +605,57 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; } +// For generating VT&R teach path measurements used in the corridor mpc (new version which directly uses the interpolated p measurements from the cbit path trajectory tracking) +struct meas_result3 GenerateReferenceMeas4(std::shared_ptr global_path_ptr, std::shared_ptr corridor_ptr, std::tuple robot_pose, int K, double DT, double VF, int current_sid, std::vector p_interp_vec) +{ + // Set point stabilization, but just note if we use this function in the cbit.cpp file we need to use the Tracking reference pose point stabilization instead + bool point_stabilization = false; + + // Initialize vectors storing the barrier values: + std::vector barrier_q_left; + std::vector barrier_q_right; + + // load the teach path + std::vector teach_path = global_path_ptr->disc_path; + + + std::vector measurements; + + + // Iterate through the interpolated p_measurements and make interpolate euclidean measurements from the teach path + for (int i = 0; i < p_interp_vec.size(); i++) + { + + struct interp_result meas = InterpolateMeas2(p_interp_vec[i], p_interp_vec, teach_path); + //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; + + // add to measurement vector + measurements.push_back(meas.measurement); + + // Find the corresponding left and right barrier q values to pass to the mpc + + // The corridor_ptr points to the stored barrier values for the entire teach trajectory (0,p_len) + // To find the corresponding values, we just need to query the corridor_ptr given the current sid_p + p_meas_vec[i], and return the q values for that bin + double p_query = p_interp_vec[i]; + // this isnt the most efficient way of doing this, but it should be fine, we really only need to run this loop 10-20 times and the size is likely less then 1000 each + int p_ind = 0; + while (corridor_ptr->p_bins[p_ind] <= p_query) + { + p_ind++; + } + barrier_q_left.push_back(corridor_ptr->q_left[p_ind-1]); + barrier_q_right.push_back(corridor_ptr->q_right[p_ind-1]); + //CLOG(WARNING, "mpc_debug.cbit") << "The left barrier is: " << corridor_ptr->q_left[p_ind-1]; + //CLOG(WARNING, "mpc_debug.cbit") << "The right barrier is: " << corridor_ptr->q_right[p_ind-1]; + } + + return {measurements, point_stabilization, barrier_q_left, barrier_q_right}; +} // function takes in the cbit path solution with a vector defining the p axis of the path, and then a desired p_meas // Then tries to output a euclidean pose interpolated for the desired p_meas. -lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) +struct interp_result InterpolateMeas2(double p_val, std::vector cbit_p, std::vector cbit_path) { // Find the lower bound of the p values for (int i = 0; i < cbit_p.size(); i++) @@ -613,9 +671,10 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c Pose pose_lower = cbit_path[i-1]; Pose pose_upper = cbit_path[i]; - double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .x - pose_lower.x); - double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .y - pose_lower.y); - double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper .z - pose_lower.z); + + double x_int = pose_lower.x + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.x - pose_lower.x); + double y_int = pose_lower.y + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.y - pose_lower.y); + double z_int = pose_lower.z + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.z - pose_lower.z); // For yaw we need to be abit careful about sign and angle wrap around // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts @@ -624,6 +683,12 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw Is: " << yaw_int; + + // we also want to interpolate p and q values based on the original p,q from the cbit_path. We use this afterwards for finding appropriate corridor mpc + // reference poses on the teach path + double p_int = pose_lower.p + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.p - pose_lower.p); + double q_int = pose_lower.q + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.q - pose_lower.q); + // Build the transformation matrix Eigen::Matrix4d T_ref; T_ref << std::cos(yaw_int),-1*std::sin(yaw_int),0, x_int, @@ -635,7 +700,8 @@ lgmath::se3::Transformation InterpolateMeas2(double p_val, std::vector c lgmath::se3::Transformation meas = lgmath::se3::Transformation(T_ref); CLOG(DEBUG, "mpc_debug.cbit") << "The measurement Euclidean state is - x: " << x_int << " y: " << y_int << " z: " << z_int << " yaw: " << yaw_int; - return meas; + CLOG(DEBUG, "mpc_debug.cbit") << "The measurement P,Q value is - p: " << p_int << " q: " << q_int; + return {meas, p_int, q_int}; } } } @@ -685,6 +751,4 @@ Eigen::Matrix SaturateVel2(Eigen::Matrix applied_vel saturated_vel << command_lin_x, command_ang_z; return saturated_vel; -} - - +} \ No newline at end of file diff --git a/main/src/vtr_pose_graph/include/vtr_pose_graph/path/localization_chain.hpp b/main/src/vtr_pose_graph/include/vtr_pose_graph/path/localization_chain.hpp index c64896429..47f786562 100644 --- a/main/src/vtr_pose_graph/include/vtr_pose_graph/path/localization_chain.hpp +++ b/main/src/vtr_pose_graph/include/vtr_pose_graph/path/localization_chain.hpp @@ -113,7 +113,6 @@ class LocalizationChain : public Path { LockGuard lock(this->mutex_); return trunk_vid_; } - EdgeTransform T_leaf_petiole() const { LockGuard lock(this->mutex_); return T_leaf_petiole_; diff --git a/rviz/honeycomb.rviz b/rviz/honeycomb.rviz index fda7a56c3..96970fb19 100644 --- a/rviz/honeycomb.rviz +++ b/rviz/honeycomb.rviz @@ -20,8 +20,10 @@ Panels: - /planning curr map loc1 - /Path2 - /Corridor Path Left1/Topic1 + - /PoseArray1 + - /PoseArray2 Splitter Ratio: 0.4507042169570923 - Tree Height: 443 + Tree Height: 871 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -74,31 +76,31 @@ Visualization Manager: Frames: All Enabled: false base_footprint: - Value: true + Value: false base_footprint_bumper_part: - Value: true + Value: false base_link: - Value: true + Value: false chassis_link: - Value: true + Value: false front_axle_link: - Value: true + Value: false front_bumblebee: - Value: true + Value: false front_left_wheel_link: - Value: true + Value: false front_right_wheel_link: - Value: true + Value: false front_xb3: - Value: true + Value: false hc: - Value: true + Value: false honeycomb: - Value: true + Value: false imu_link: - Value: true + Value: false left_lidar: - Value: true + Value: false lidar: Value: false loc vertex frame: @@ -106,33 +108,33 @@ Visualization Manager: loc vertex frame (offset): Value: false m600_base: - Value: true + Value: false microstrain: - Value: true + Value: false navsat_link: - Value: true + Value: false odo vertex frame: Value: false odom: - Value: true + Value: false planning frame: Value: false rear_antenna_link: - Value: true + Value: false rear_bumblebee: - Value: true + Value: false rear_left_wheel_link: - Value: true + Value: false rear_right_wheel_link: - Value: true + Value: false rear_sensor_base_link: - Value: true + Value: false rear_sensor_plate_link: - Value: true + Value: false rear_xb3: - Value: true + Value: false right_lidar: - Value: true + Value: false robot: Value: true robot planning: @@ -142,9 +144,9 @@ Visualization Manager: robot planning (extrapolated) mpc: Value: false sensor_anchor_link: - Value: true + Value: false velodyne: - Value: true + Value: false world: Value: false world (offset): @@ -1407,6 +1409,48 @@ Visualization Manager: Use Fixed Frame: true Use rainbow: true Value: false + - Alpha: 1 + Arrow Length: 0.30000001192092896 + Axes Length: 0.30000001192092896 + Axes Radius: 0.009999999776482582 + Class: rviz_default_plugins/PoseArray + Color: 0; 0; 0 + Enabled: true + Head Length: 0.15000000596046448 + Head Radius: 0.10000000149011612 + Name: PoseArray + Shaft Length: 0.10000000149011612 + Shaft Radius: 0.05000000074505806 + Shape: Arrow (3D) + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/mpc_ref_pose_array1 + Value: true + - Alpha: 1 + Arrow Length: 0.30000001192092896 + Axes Length: 0.30000001192092896 + Axes Radius: 0.009999999776482582 + Class: rviz_default_plugins/PoseArray + Color: 255; 255; 255 + Enabled: true + Head Length: 0.15000000596046448 + Head Radius: 0.10000000149011612 + Name: PoseArray + Shaft Length: 0.10000000149011612 + Shaft Radius: 0.05000000074505806 + Shape: Arrow (3D) + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /vtr/mpc_ref_pose_array2 + Value: true Enabled: true Global Options: Background Color: 181; 181; 181 @@ -1453,16 +1497,16 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 14.629002571105957 + Distance: 21.608963012695312 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: 1.5698918104171753 - Y: -0.01999158412218094 - Z: 1.453581690788269 + X: 0.46887874603271484 + Y: -0.032121725380420685 + Z: -0.3164111375808716 Focal Shape Fixed Size: false Focal Shape Size: 0.05000000074505806 Invert Z Axis: false @@ -1471,7 +1515,7 @@ Visualization Manager: Pitch: 1.5697963237762451 Target Frame: robot Value: Orbit (rviz_default_plugins) - Yaw: 3.288867473602295 + Yaw: 3.1313488483428955 Saved: - Class: rviz_default_plugins/Orbit Distance: 111.39698028564453 From b4d73495e545b0e1f21304d8b7f67bf73ac7cf35 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 9 May 2023 18:02:46 -0400 Subject: [PATCH 21/25] pre-merge cleanup --- launch/online_honeycomb_grizzly.launch.yaml | 4 ++-- main/src/vtr_gui/vtr_gui/socket_server.py | 12 ++++++------ main/src/vtr_navigation/src/navigator.cpp | 5 ++++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/launch/online_honeycomb_grizzly.launch.yaml b/launch/online_honeycomb_grizzly.launch.yaml index b035b24ef..8976a7d4e 100644 --- a/launch/online_honeycomb_grizzly.launch.yaml +++ b/launch/online_honeycomb_grizzly.launch.yaml @@ -37,7 +37,7 @@ windows: - window_name: rviz2 layout: main-horizontal shell_command_before: - - source /opt/ros/galactic/setup.bash + - source ${VTRSRC}/main/install/setup.bash panes: - - ros2 run rviz2 rviz2 -d ${VTRSRC}/rviz/lidar.rviz + - ros2 run rviz2 rviz2 -d ${VTRSRC}/rviz/honeycomb.rviz # - ros2 run rqt_reconfigure rqt_reconfigure diff --git a/main/src/vtr_gui/vtr_gui/socket_server.py b/main/src/vtr_gui/vtr_gui/socket_server.py index f550908a6..540fd2f60 100755 --- a/main/src/vtr_gui/vtr_gui/socket_server.py +++ b/main/src/vtr_gui/vtr_gui/socket_server.py @@ -128,42 +128,42 @@ def handle_change_env_info(data): def handle_server_state(json): logger.info('Broadcasting server state') server_state = json['server_state'] - socketio.emit(u"mission/server_state", server_state, broadcast=True) + socketio.emit(u"mission/server_state", server_state) @socketio.on('notification/graph_state') def handle_graph_state(json): logger.info('Broadcasting graph state') graph_state = json['graph_state'] - socketio.emit(u"graph/state", graph_state, broadcast=True) + socketio.emit(u"graph/state", graph_state) @socketio.on('notification/graph_update') def handle_graph_update(json): logger.info('Broadcasting graph update') graph_update = json['graph_update'] - socketio.emit(u"graph/update", graph_update, broadcast=True) + socketio.emit(u"graph/update", graph_update) @socketio.on('notification/robot_state') def handle_robot_state(json): logger.info('Broadcasting robot state') robot_state = json['robot_state'] - socketio.emit(u"robot/state", robot_state, broadcast=True) + socketio.emit(u"robot/state", robot_state) @socketio.on('notification/following_route') def handle_following_route(json): logger.info('Broadcasting following route') following_route = json['following_route'] - socketio.emit(u"following_route", following_route, broadcast=True) + socketio.emit(u"following_route", following_route) @socketio.on('notification/task_queue_update') def handle_task_queue_update(json): logger.info('Broadcasting task queue update') task_queue_update = json['task_queue_update'] - socketio.emit(u"task_queue/update", task_queue_update, broadcast=True) + socketio.emit(u"task_queue/update", task_queue_update) def main(): diff --git a/main/src/vtr_navigation/src/navigator.cpp b/main/src/vtr_navigation/src/navigator.cpp index a88abe4f6..2299c1391 100644 --- a/main/src/vtr_navigation/src/navigator.cpp +++ b/main/src/vtr_navigation/src/navigator.cpp @@ -138,7 +138,10 @@ Navigator::Navigator(const rclcpp::Node::SharedPtr& node) : node_(node) { // lidar pointcloud data subscription const auto lidar_topic = node_->declare_parameter("lidar_topic", "/points"); // \note lidar point cloud data frequency is low, and we cannot afford dropping data - lidar_sub_ = node_->create_subscription(lidar_topic, rclcpp::SystemDefaultsQoS(), std::bind(&Navigator::lidarCallback, this, std::placeholders::_1), sub_opt); + auto lidar_qos = rclcpp::QoS(10); + lidar_qos.reliable(); + lidar_sub_ = node_->create_subscription(lidar_topic, lidar_qos, std::bind(&Navigator::lidarCallback, this, std::placeholders::_1), sub_opt); + //lidar_sub_ = node_->create_subscription(lidar_topic, rclcpp::SystemDefaultsQoS(), std::bind(&Navigator::lidarCallback, this, std::placeholders::_1), sub_opt); #endif // clang-format on From 541c9a5ba5f40145afadfaedbd0c5b8c67cb8d9c Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Tue, 9 May 2023 19:30:08 -0400 Subject: [PATCH 22/25] pre-merge cleanup --- config/honeycomb_grizzly_default.yaml | 23 ++-- .../src/path_planning/mpc_path_planner2.cpp | 14 +-- main/src/vtr_path_planning/src/cbit/cbit.cpp | 31 ++++- .../src/mpc/mpc_path_planner2.cpp | 112 +++++++++++++++--- 4 files changed, 139 insertions(+), 41 deletions(-) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index 89ecc1dfa..b4bb96e7d 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -3,16 +3,17 @@ log_to_file: true log_debug: true log_enabled: - - lidar.terrain_assessment - - navigation - - navigation.graph_map_server - - navigation.command + #- lidar.terrain_assessment + #- navigation + #- navigation.graph_map_server + #- navigation.command # tactic - - tactic + #- tactic - tactic.pipeline - - tactic.module - - tactic.module.live_mem_manager - - tactic.module.graph_mem_manager + - lidar.preprocessing + #- tactic.module + #- tactic.module.live_mem_manager + #- tactic.module.graph_mem_manager # path planner #"path_planning", #"path_planning.teb", @@ -22,10 +23,10 @@ #"mpc_debug.cbit", #"obstacle_detection.cbit", # mission planner - - mission.server - - mission.state_machine + #- mission.server + #- mission.state_machine # pose graph - - pose_graph + #- pose_graph # pipeline specific # "lidar.pipeline", # "lidar.honeycomb_converter", diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index c572fa2d9..c6c25de32 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -198,17 +198,17 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se opt_problem.addCostTerm(kin_cost_term); // Non-Zero Velocity Penalty (penalty of non resting control effort helps with point stabilization) - //const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); - //opt_problem.addCostTerm(vel_cost_term); + const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(vel_state_vars[i], sharedVelNoiseModel, velLossFunc); + opt_problem.addCostTerm(vel_cost_term); // Velocity set-point constraint - No longer using this due to complications when repeating a path in reverse // Only add this cost term if we are not in point stabilization mode (end of path) - if (point_stabilization == false) - { - const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); - opt_problem.addCostTerm(vel_cost_term); - } + //if (point_stabilization == false) + //{ + // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); + // opt_problem.addCostTerm(vel_cost_term); + //} // Acceleration Constraints diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index b488ca1fc..30a04e637 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -426,7 +426,7 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { CLOG(DEBUG, "mpc_debug.cbit") << "The Inverted Current Robot State Using Direct Robot Values is: " << T0_inv; // End of pose extrapolation - + /* // Calculate which T_ref measurements to used based on the current path solution CLOG(INFO, "mpc.cbit") << "Attempting to generate T_ref measurements"; auto meas_result = GenerateReferenceMeas2(cbit_path_ptr, robot_pose, K, DT, VF); @@ -442,6 +442,29 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { std::vector barrier_q_left = meas_result3.barrier_q_left; std::vector barrier_q_right = meas_result3.barrier_q_right; // END of experimental code + */ + + + + // Calculate which T_ref measurements to used based on the current path solution + CLOG(INFO, "mpc.cbit") << "Attempting to generate T_ref measurements"; + auto meas_result = GenerateReferenceMeas2(cbit_path_ptr, robot_pose, K, DT, VF); + auto measurements = meas_result.measurements; + bool point_stabilization = meas_result.point_stabilization; + + std::vector p_interp_vec = meas_result.p_interp_vec; + std::vector q_interp_vec = meas_result.q_interp_vec; + + // Experimental Synchronized Tracking/Teach Reference Poses: + auto meas_result4 = GenerateReferenceMeas4(global_path_ptr, corridor_ptr, robot_pose, K, DT, VF, curr_sid, p_interp_vec); + auto measurements4 = meas_result4.measurements; + bool point_stabilization4 = meas_result4.point_stabilization; + std::vector barrier_q_left = meas_result4.barrier_q_left; + std::vector barrier_q_right = meas_result4.barrier_q_right; + //CLOG(ERROR, "mpc_debug.cbit") << "The New Reference Measurements are: " << measurements4; + + + std::vector ref_pose_vec1; for (int i = 0; i Command { ref_pose_vec1.push_back(measurements[i].inverse()); } std::vector ref_pose_vec2; - for (int i = 0; i Command { { CLOG(INFO, "mpc.cbit") << "Attempting to solve the MPC problem"; // Solve using corridor mpc - auto mpc_result = SolveMPC2(applied_vel, T0, measurements3, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); + auto mpc_result = SolveMPC2(applied_vel, T0, measurements4, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization, pose_error_weight, vel_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); // Solve using tracking mpc //auto mpc_result = SolveMPC2(applied_vel, T0, measurements, measurements, barrier_q_left, barrier_q_right, K, DT, VF, lat_noise_vect, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization3, pose_error_weight, acc_error_weight, kin_error_weight, lat_error_weight); //auto mpc_result = SolveMPC(applied_vel, T0, measurements, K, DT, VF, pose_noise_vect, vel_noise_vect, accel_noise_vect, kin_noise_vect, point_stabilization); // Tracking controller version diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index a857a026a..a90a8cf8b 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -33,6 +33,46 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se3::Transformation T0, std::vector measurements, std::vector measurements_cbit, std::vector barrier_q_left, std::vector barrier_q_right, int K, double DT, double VF, Eigen::Matrix lat_noise_vect, Eigen::Matrix pose_noise_vect, Eigen::Matrix vel_noise_vect, Eigen::Matrix accel_noise_vect, Eigen::Matrix kin_noise_vect, bool point_stabilization, double pose_error_weight, double vel_error_weight, double acc_error_weight, double kin_error_weight, double lat_error_weight) { + // Pre-process the measurements to correct the cbit path yaw values depending on if the robot should be moving in forward or reverse + //Eigen::Matrix T_ROT_YAW; + //T_ROT_YAW << -1, 0, 0, 0, + // 0, -1, 0, 0, + // 0, 0, 1, 0, + // 0, 0, 0, 1; + //lgmath::se3::Transformation T_ROT_YAW2(T_ROT_YAW); + //CLOG(ERROR, "mpc_debug.cbit") << "THE PI ROTATION MATRIX IS: " << T_ROT_YAW2; + + //for (int i = 0; i < measurements_cbit.size(); i++) + //{ + // auto test1 = measurements[i].matrix().block(0, 0, 3, 3); + // auto test2 = measurements_cbit[i].matrix().block<3, 1>(0, 3); + // CLOG(ERROR, "mpc_debug.cbit") << "TEST2 IS: " << test2; + // auto cbit_pose = measurements_cbit[i].matrix().block(0, 0, 3, 3); + // auto teach_pose = measurements[i].matrix().block(0, 0, 3, 3); + // CLOG(ERROR, "mpc_debug.cbit") << "THE CBIT MATRIX IS: " << measurements_cbit[i]; + // CLOG(ERROR, "mpc_debug.cbit") << "THE TEACH MATRIX IS: " << measurements[i]; + //measurements_cbit[i] = measurements[i]; + // lgmath::se3::Transformation new_meas_cbit(test1,test2); + // measurements_cbit[i] = new_meas_cbit; + + // CLOG(ERROR, "mpc_debug.cbit") << "THE ROTATED CBIT MATRIX IS: " << measurements_cbit[i]; + //auto rel_pose = teach_pose.transpose() * cbit_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE CBIT ROTATION MATRIX IS: " << cbit_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE TEACH ROTATION MATRIX IS: " << teach_pose; + //CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE ROTATION MATRIX IS: " << teach_pose.transpose() * cbit_pose; + //double relative_yaw = atan2(rel_pose(1,0),rel_pose(0,0)); + //CLOG(ERROR, "mpc_debug.cbit") << "THE RELATIVE YAW IS: " << relative_yaw; + //if (fabs(relative_yaw) >= 1.57075) + //{ + // measurements_cbit[i] = measurements_cbit[i] * T_ROT_YAW2; + // CLOG(ERROR, "mpc_debug.cbit") << "CORRECTED THE YAW"; + //} + //CLOG(ERROR, "mpc_debug.cbit") << "THE ROTATED CBIT MATRIX IS: " << measurements_cbit[i]; + //} + // End of pre-pre-processing measurement rotations + + + // Conduct an MPC Iteration given current configurations // Velocity set-points (desired forward velocity and angular velocity), here we set a static forward target velocity, and try to minimize rotations (0rad/sec) @@ -125,7 +165,7 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Setup the optimization problem steam::OptimizationProblem opt_problem; - for (int i=0; i previous_vel, lgmath::se for (int i = 0; i < K; i++) { // Pose Error - const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); - const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); - opt_problem.addCostTerm(pose_cost_term); + if (i > 0) + { + const auto pose_error_func = steam::se3::SE3ErrorEvaluator::MakeShared(pose_state_vars[i], measurements[i]); + const auto pose_cost_term = steam::WeightedLeastSqCostTerm<6>::MakeShared(pose_error_func, sharedPoseNoiseModel, poseLossFunc); + opt_problem.addCostTerm(pose_cost_term); + } // Kinematic constraints (softened but penalized heavily) if (i < (K-1)) @@ -161,12 +204,14 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Velocity set-point constraint - No longer using this due to complications when repeating a path in reverse // Only add this cost term if we are not in point stabilization mode (end of path) + //if (point_stabilization == false) //{ // const auto vel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(steam::vspace::VSpaceErrorEvaluator<2>::MakeShared(vel_state_vars[i],v_ref), sharedVelNoiseModel, velLossFunc); // opt_problem.addCostTerm(vel_cost_term); //} - + + // Acceleration Constraints if (i == 0) { @@ -181,19 +226,19 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se const auto accel_cost_term = steam::WeightedLeastSqCostTerm<2>::MakeShared(accel_diff, sharedAccelNoiseModel, accelLossFunc); opt_problem.addCostTerm(accel_cost_term); } - + } // Laterial Barrier State Constraints - if (i > 0) + if (i >= 0) { // Generate a locked transform evaluator to store the current measurement for state constraints // The reason we make it a variable and lock it is so we can use the built in steam evaluators which require evaluable inputs measurement_vars.push_back(steam::se3::SE3StateVar::MakeShared(measurements[i])); - measurement_vars[i-1]->locked() = true; + measurement_vars[i]->locked() = true; // Take the compose inverse of the locked measurement w.r.t the state transforms - const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i-1], pose_state_vars[i]); + const auto compose_inv = steam::se3::ComposeInverseEvaluator::MakeShared(measurement_vars[i], pose_state_vars[i]); // Use the ComposeLandmarkEvaluator to right multiply the 4th column of the identity matrix to create a 4x1 homogenous point vector with lat,lon,alt error components const auto error_vec = steam::stereo::ComposeLandmarkEvaluator::MakeShared(compose_inv, I_4_eval); @@ -240,14 +285,26 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se // Solve the optimization problem with GuassNewton solver //using SolverType = steam::GaussNewtonSolver; // Old solver, does not have back stepping capability using SolverType = steam::LineSearchGaussNewtonSolver; + //using SolverType = steam::DoglegGaussNewtonSolver; // Initialize solver parameters SolverType::Params params; - params.verbose = false; // Makes the output display for debug when true + params.verbose = true; // Makes the output display for debug when true params.relative_cost_change_threshold = 1e-6; - params.max_iterations = 200; + params.max_iterations = 100; params.absolute_cost_change_threshold = 1e-6; - params.backtrack_multiplier = 0.5; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver - params.max_backtrack_steps = 1000; // Line Search Specifc Params, will fail to build if using GaussNewtonSolver + params.backtrack_multiplier = 0.5; // Line Search Specific Params, will fail to build if using GaussNewtonSolver + params.max_backtrack_steps = 200; // Line Search Specific Params, will fail to build if using GaussNewtonSolver + //params.ratio_threshold_shrink = 0.1; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Grow trust region if ratio of actual to predicted cost reduction above + /// this (range: 0.0-1.0) + //params.ratio_threshold_grow = 0.9; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Amount to shrink by (range: <1.0) + //params.shrink_coeff = 0.9; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Amount to grow by (range: >1.0) + //params.grow_coeff = 1.1; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + /// Maximum number of times to shrink trust region before giving up + //params.max_shrink_steps = 200; // Dogleg Specific Params, will fail to build if using GaussNewtonSolver + SolverType solver(opt_problem, params); // Solve the optimization problem @@ -329,7 +386,6 @@ struct meas_result GenerateReferenceMeas2(std::shared_ptr> cbi // Save a copy of the current path solution to work on auto cbit_path = *cbit_path_ptr; - // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -503,6 +559,8 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path p_robot = 0.0; } + CLOG(WARNING, "mpc_debug.cbit") << "The p value for the robot is: " << p_robot + sid_p; + // PSEUDO CODE: // 1. Find the closest point on the cbit path to the current state of the robot // 2. Using K, DT, VF, we generate a vector of "p" values that we want to create Euclidean measurements for (we know these up front) @@ -556,8 +614,6 @@ struct meas_result3 GenerateReferenceMeas3(std::shared_ptr global_path { p_meas_vec.push_back((i * DT * VF) + p_robot); } - CLOG(ERROR, "mpc.cbit") << "The Approximate Robot State P value is (teach meas): " << p_robot; - CLOG(ERROR, "mpc_debug.cbit") << "p_meas_vec is (teach path): " << p_meas_vec; // Iterate through the p_measurements and interpolate euclidean measurements from the cbit_path and the corresponding cbit_p values // Note this could probably be combined in the previous loop too @@ -617,16 +673,19 @@ struct meas_result3 GenerateReferenceMeas4(std::shared_ptr global_path // load the teach path std::vector teach_path = global_path_ptr->disc_path; + std::vector teach_path_p = global_path_ptr->p; std::vector measurements; + //CLOG(ERROR, "mpc_debug.cbit") << "The p_interp_vec is: " << p_interp_vec; + //CLOG(ERROR, "mpc_debug.cbit") << "The Global Path p is: " << teach_path_p; // Iterate through the interpolated p_measurements and make interpolate euclidean measurements from the teach path for (int i = 0; i < p_interp_vec.size(); i++) { - struct interp_result meas = InterpolateMeas2(p_interp_vec[i], p_interp_vec, teach_path); + struct interp_result meas = InterpolateMeas2(p_interp_vec[i], teach_path_p, teach_path); //CLOG(WARNING, "corridor_mpc_debug.cbit") << "Adding Measurement: " << meas; // add to measurement vector @@ -680,9 +739,24 @@ struct interp_result InterpolateMeas2(double p_val, std::vector cbit_p, // Derive the yaw by creating the vector connecting the pose_upp and pose_lower pts // TODO: There is a problem here for reverse planning, will need to rotate the yaw 180 degrees in that case. // For normal forward planning this is fine though - double yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); - //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw Is: " << yaw_int; + // This interpolation if we do have yaw available (when the input path is the teach path as it is for corridor mpc) + double yaw_int; + //yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + if ((pose_lower.yaw == 0.0) && (pose_upper.yaw == 0.0)) + { + // Yaw interpolation when we dont have yaw available explicitly (i.e from cbit path euclid conversion) + yaw_int = std::atan2((pose_upper.y - pose_lower.y), (pose_upper.x - pose_lower.x)); + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw For CBIT PATH is: " << yaw_int; + } + else + { + // if this is used on the cbit path, it will just return 0.0 every time (note this will break tracking control if yaw covariance set low) + //yaw_int = pose_lower.yaw + ((p_val - p_lower) / (p_upper - p_lower)) * (pose_upper.yaw - pose_lower.yaw); + yaw_int = pose_lower.yaw; + //CLOG(ERROR, "mpc_debug.cbit") << "The Yaw For TEACH PATH is: " << yaw_int; + } + // we also want to interpolate p and q values based on the original p,q from the cbit_path. We use this afterwards for finding appropriate corridor mpc // reference poses on the teach path From 7ccf902b62e46eb99c554aae06b19ccf35fd28a8 Mon Sep 17 00:00:00 2001 From: a-krawciw Date: Tue, 9 May 2023 21:01:39 -0400 Subject: [PATCH 23/25] Address xacro, radar and tactic comments --- .../modules/odometry/odometry_icp_module.cpp | 6 +++--- main/src/vtr_tactic/src/tactic.cpp | 17 +++++------------ .../urdf/accessories/honeycomb.urdf.xacro | 13 ------------- 3 files changed, 8 insertions(+), 28 deletions(-) delete mode 100644 robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro diff --git a/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp b/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp index d43dac769..1dc238cc0 100644 --- a/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp +++ b/main/src/vtr_radar/src/modules/odometry/odometry_icp_module.cpp @@ -128,7 +128,7 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, int first_steps = config_->first_num_steps; int max_it = config_->initial_max_iter; float max_pair_d = config_->initial_max_pairing_dist; - // float max_planar_d = config_->initial_max_planar_dist; + float max_planar_d = config_->initial_max_planar_dist; float max_pair_d2 = max_pair_d * max_pair_d; KDTreeSearchParams search_params; @@ -195,7 +195,7 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, /// Eigen matrix of original data (only shallow copy of ref clouds) const auto map_mat = point_map.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - // const auto map_normals_mat = point_map.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); + const auto map_normals_mat = point_map.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); const auto query_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); const auto query_norms_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); auto aligned_mat = aligned_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); @@ -466,7 +466,7 @@ void OdometryICPModule::run_(QueryCache &qdata0, OutputCache &, // reduce the max distance max_pair_d = config_->refined_max_pairing_dist; max_pair_d2 = max_pair_d * max_pair_d; - // max_planar_d = config_->refined_max_planar_dist; + max_planar_d = config_->refined_max_planar_dist; } } timer[5]->stop(); diff --git a/main/src/vtr_tactic/src/tactic.cpp b/main/src/vtr_tactic/src/tactic.cpp index 5b0f1b66c..7160b58ed 100644 --- a/main/src/vtr_tactic/src/tactic.cpp +++ b/main/src/vtr_tactic/src/tactic.cpp @@ -164,22 +164,15 @@ bool Tactic::passedSeqId(const uint64_t& sid) const { bool Tactic::routeCompleted() const { auto lock = chain_->guard(); const auto translation = chain_->T_leaf_trunk().r_ab_inb().norm(); - //const auto test = (chain_->pose(chain_->size()-1)).r_ab_inb().norm(); - //CLOG(ERROR, "tactic") << "My test pose error is: " << test; - //if (test < config_->route_completion_translation_threshold) // experimental jordy modification - //{ - // CLOG(ERROR, "tactic") << "Route Completetion Threshold is: " << translation; - // CLOG(ERROR, "tactic") << "My test pose error is: " << test; - // return true; - //} - if (chain_->trunkSequenceId() < (chain_->sequence().size() - 1)) - { + + if (chain_->trunkSequenceId() < (chain_->sequence().size() - 1)) { return false; } - if (translation > config_->route_completion_translation_threshold) - { + + if (translation > config_->route_completion_translation_threshold) { return false; } + return true; } diff --git a/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro b/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro deleted file mode 100644 index 0ba4c8c3b..000000000 --- a/robots/src/utias_grizzly_description/urdf/accessories/honeycomb.urdf.xacro +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - From 6e0d7f351ca5004917c103721622a92f8b0ba984 Mon Sep 17 00:00:00 2001 From: a-krawciw Date: Wed, 10 May 2023 08:47:52 -0400 Subject: [PATCH 24/25] Make matplotlib_cpp a proper dependency --- Dockerfile | 5 ++ main/src/vtr_lidar/CMakeLists.txt | 30 +--------- main/src/vtr_navigation/CMakeLists.txt | 32 +--------- main/src/vtr_path_planning/CMakeLists.txt | 59 ++++++------------- .../vtr_path_planning/cbit/cbit_plotting.hpp | 1 + .../src/cbit/cbit_plotting.cpp | 6 +- 6 files changed, 31 insertions(+), 102 deletions(-) diff --git a/Dockerfile b/Dockerfile index ac0c68a5c..6bba64607 100644 --- a/Dockerfile +++ b/Dockerfile @@ -94,6 +94,11 @@ RUN pip3 install \ python-socketio[client] \ websocket-client +RUN mkdir -p ${HOMEDIR}/.matplotcpp && cd ${HOMEDIR}/.matplotcpp \ + && git clone https://github.com/lava/matplotlib-cpp.git . \ + && mkdir -p ${HOMEDIR}/.matplotcpp/build && cd ${HOMEDIR}/.matplotcpp/build \ + && cmake .. && cmake --build . -j${NUMPROC} --target install + RUN apt install htop # Install vim diff --git a/main/src/vtr_lidar/CMakeLists.txt b/main/src/vtr_lidar/CMakeLists.txt index befb57ad1..22761d6e0 100644 --- a/main/src/vtr_lidar/CMakeLists.txt +++ b/main/src/vtr_lidar/CMakeLists.txt @@ -68,36 +68,8 @@ add_library(${PROJECT_NAME}_path_planning ${PATH_PLANNING_SRC}) target_link_libraries(${PROJECT_NAME}_path_planning ${PROJECT_NAME}_components) ament_target_dependencies(${PROJECT_NAME}_path_planning vtr_logging vtr_path_planning vtr_lidar_msgs - lgmath steam -) - -# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES -# find python libraries -find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) -find_package(PythonLibs 3.0 REQUIRED) -include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) - -# populate matplotlib repository -include(FetchContent) -FetchContent_Declare( - matplotlib - GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git - GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 + lgmath steam vtr_path_planning ) -FetchContent_GetProperties(matplotlib) -if(NOT matplotlib_POPULATED) - FetchContent_Populate(matplotlib) -endif() -include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) - -target_link_libraries(${PROJECT_NAME}_path_planning ${PYTHON_LIBRARIES}) - - -find_package(PythonLibs REQUIRED) -include_directories(${PYTHON_INCLUDE_DIRS}) -target_link_libraries(${PROJECT_NAME}_path_planning ${PYTHON_LIBRARIES}) -# END OF JORDY EXPERIMENTAL MATPLOTLIBCPP Changes FOR PLOTTING - # additional tools for experiments file(GLOB_RECURSE TOOLS_SRC diff --git a/main/src/vtr_navigation/CMakeLists.txt b/main/src/vtr_navigation/CMakeLists.txt index adf45363d..23503017f 100644 --- a/main/src/vtr_navigation/CMakeLists.txt +++ b/main/src/vtr_navigation/CMakeLists.txt @@ -19,6 +19,7 @@ find_package(vtr_common REQUIRED) find_package(vtr_logging REQUIRED) find_package(vtr_pose_graph REQUIRED) find_package(vtr_tactic REQUIRED) +find_package(vtr_path_planning REQUIRED) find_package(vtr_mission_planning REQUIRED) find_package(vtr_navigation_msgs REQUIRED) @@ -67,7 +68,7 @@ add_library(${PROJECT_NAME}_navigator ${SRC}) ament_target_dependencies(${PROJECT_NAME}_navigator rclcpp tf2 tf2_ros sensor_msgs # for visualization only - vtr_common vtr_logging vtr_pose_graph vtr_tactic vtr_mission_planning + vtr_common vtr_logging vtr_pose_graph vtr_tactic vtr_mission_planning vtr_path_planning vtr_lidar # vtr_vision ) @@ -86,7 +87,7 @@ ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) ament_export_dependencies( rclcpp tf2 tf2_ros vtr_common vtr_logging vtr_pose_graph vtr_tactic vtr_mission_planning - vtr_navigation_msgs + vtr_navigation_msgs vtr_path_planning vtr_lidar # vtr_vision PROJ @@ -104,33 +105,6 @@ target_include_directories(${PROJECT_NAME} $) -# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES -# find python libraries -find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) -find_package(PythonLibs 3.0 REQUIRED) -include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) - -# populate matplotlib repository -include(FetchContent) -FetchContent_Declare( - matplotlib - GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git - GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 -) -FetchContent_GetProperties(matplotlib) -if(NOT matplotlib_POPULATED) - FetchContent_Populate(matplotlib) -endif() -include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) - -target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) - - -find_package(PythonLibs REQUIRED) -include_directories(${PYTHON_INCLUDE_DIRS}) -target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) -# END OF JORDY EXPERIMENTAL MATPLOTLIBCPP Changes FOR PLOTTING - install( DIRECTORY include/ DESTINATION include diff --git a/main/src/vtr_path_planning/CMakeLists.txt b/main/src/vtr_path_planning/CMakeLists.txt index b89bdd948..aada0e75c 100644 --- a/main/src/vtr_path_planning/CMakeLists.txt +++ b/main/src/vtr_path_planning/CMakeLists.txt @@ -21,11 +21,18 @@ find_package(vtr_common REQUIRED) find_package(vtr_logging REQUIRED) find_package(vtr_tactic REQUIRED) + +# # find python libraries +find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) +find_package(PythonLibs 3.0 REQUIRED) + # C++ Libraries include_directories( PUBLIC $ $ + ${PYTHON3_INCLUDE_DIRS} + ${NumPy_INCLUDE_DIRS} ) file(GLOB_RECURSE SRC src/base_path_planner.cpp) @@ -40,59 +47,29 @@ add_library(${PROJECT_NAME}_mpc ${SRC}) target_link_libraries(${PROJECT_NAME}_mpc ${PROJECT_NAME}_base) ament_target_dependencies(${PROJECT_NAME}_mpc rclcpp tf2 tf2_ros tf2_eigen - lgmath steam -) - -file(GLOB_RECURSE SRC src/cbit/*.cpp) -add_library(${PROJECT_NAME}_cbit ${SRC}) -target_link_libraries(${PROJECT_NAME}_cbit ${PROJECT_NAME}_base) -ament_target_dependencies(${PROJECT_NAME}_cbit - rclcpp tf2 tf2_ros tf2_eigen - lgmath steam + lgmath steam Python3 ) -# Hacky way of declaring the helper scripts -file(GLOB_RECURSE SRC src/cbit/generate_pq.cpp) -file(GLOB_RECURSE SRC src/cbit/utils.cpp) -file(GLOB_RECURSE SRC src/cbit/cbit_path_planner.cpp) -# START OF JORDY EXPERIMENTAL MATPLOTLIB CHANGES (note also had to make similar changes in vtr_lidar and vtr_navigation cmakelist.txt also for some dumb reason) -# Declare the plotting tools -file(GLOB_RECURSE SRC src/cbit/cbit_plotting.cpp) +find_package(matplotlib_cpp REQUIRED) +include_directories(SYSTEM ${matplotlib_cpp_SOURCE_DIR}) -# find python libraries -find_package(Python3 COMPONENTS Interpreter Development NumPy REQUIRED) -find_package(PythonLibs 3.0 REQUIRED) -include_directories(${PYTHON3_INCLUDE_DIRS} ${NumPy_INCLUDE_DIRS}) - -# populate matplotlib repository -include(FetchContent) -FetchContent_Declare( - matplotlib - GIT_REPOSITORY https://github.com/lava/matplotlib-cpp.git - GIT_TAG f23347fca25219d1c42cbb91608b5556814bf572 -) -FetchContent_GetProperties(matplotlib) -if(NOT matplotlib_POPULATED) - FetchContent_Populate(matplotlib) -endif() -include_directories(SYSTEM ${matplotlib_SOURCE_DIR}) +file(GLOB_RECURSE SRC src/cbit/*.cpp) +add_library(${PROJECT_NAME}_cbit ${SRC}) +target_link_libraries(${PROJECT_NAME}_cbit ${PROJECT_NAME}_base) target_link_libraries(${PROJECT_NAME}_cbit ${PYTHON_LIBRARIES}) - -find_package(PythonLibs REQUIRED) -include_directories(${PYTHON_INCLUDE_DIRS}) -target_link_libraries(${PROJECT_NAME}_cbit ${PYTHON_LIBRARIES}) -# EXPERIMENTAL, JORDY ADDITION FOR MATPLOTLIBCPP PLOTTING -#target_link_libraries($vtr_path_planning ${PYTHON_LIBRARIES} Python3::NumPy) -# END OF EXPERIMENTAL MATPLOTLIBCPP +ament_target_dependencies(${PROJECT_NAME}_cbit + rclcpp tf2 tf2_ros tf2_eigen Python3 + lgmath steam PythonLibs matplotlib_cpp +) ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET) ament_export_dependencies( - rclcpp tf2 tf2_ros tf2_eigen geometry_msgs + rclcpp tf2 tf2_ros tf2_eigen geometry_msgs matplotlib_cpp vtr_common vtr_logging vtr_tactic ) diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp index 83974a017..449981205 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_plotting.hpp @@ -21,6 +21,7 @@ // This library is not meant to be included in the main vt&r branch, debug only #include +#include #include "matplotlibcpp.h" // experimental plotting #include "vtr_path_planning/cbit/utils.hpp" diff --git a/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp index 413489b3f..eb4f3c668 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_plotting.cpp @@ -28,7 +28,7 @@ void plot_tree(Tree tree, Node robot_pq, std::vector path_p, std::vector matplotlibcpp::clf(); // Configure plotting keyword settings - std::unordered_map keywords_robot; + std::map keywords_robot; keywords_robot["color"] = "lime"; @@ -79,7 +79,7 @@ void plot_tree(Tree tree, Node robot_pq, std::vector path_p, std::vector */ // Plot the current robot state - matplotlibcpp::named_plot("Robot State", {-1*robot_pq.q}, std::vector {robot_pq.p}, ".g"); + matplotlibcpp::named_plot("Robot State", std::vector {-1*robot_pq.q}, std::vector {robot_pq.p}, ".g"); matplotlibcpp::scatter(std::vector {-1*robot_pq.q}, std::vector {robot_pq.p}, 200.0, keywords_robot); // try to change colour, need to implement @@ -105,7 +105,7 @@ void plot_tree(Tree tree, Node robot_pq, std::vector path_p, std::vector void plot_robot(Node robot_pq) { // Configure plotting keyword settings - std::unordered_map keywords_robot; + std::map keywords_robot; keywords_robot["color"] = "lime"; // Plot the current robot state From 9cd27a1d0d4cef85bd961f72213c398a3a7233f5 Mon Sep 17 00:00:00 2001 From: Jordy Sehn Date: Wed, 10 May 2023 10:50:42 -0400 Subject: [PATCH 25/25] pull request comment revisions --- config/honeycomb_grizzly_default.yaml | 52 +- .../include/vtr_lidar/modules/modules.hpp | 3 - .../planning/change_detection_module.hpp | 99 ---- .../planning/change_detection_module_v2.hpp | 109 ---- .../modules/planning/fake_obstacle_module.hpp | 82 --- .../localization/localization_icp_module.cpp | 1 - .../planning/change_detection_module.cpp | 407 --------------- .../planning/change_detection_module_v2.cpp | 476 ------------------ .../modules/planning/fake_obstacle_module.cpp | 131 ----- .../preprocessing/preprocessing_module_v2.cpp | 2 +- main/src/vtr_lidar/src/path_planning/cbit.cpp | 15 +- .../src/path_planning/mpc_path_planner2.cpp | 6 +- .../include/vtr_path_planning/cbit/cbit.hpp | 2 + .../vtr_path_planning/cbit/cbit_config.hpp | 3 + .../cbit/cbit_path_planner.hpp | 16 +- ...ators.hpp => lateral_error_evaluators.hpp} | 32 +- main/src/vtr_path_planning/src/cbit/cbit.cpp | 13 +- .../src/cbit/cbit_path_planner.cpp | 23 +- .../src/mpc/lateral_error_evaluator.cpp | 52 +- .../src/mpc/mpc_path_planner2.cpp | 6 +- 20 files changed, 102 insertions(+), 1428 deletions(-) delete mode 100644 main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module.hpp delete mode 100644 main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v2.hpp delete mode 100644 main/src/vtr_lidar/include/vtr_lidar/modules/planning/fake_obstacle_module.hpp delete mode 100644 main/src/vtr_lidar/src/modules/planning/change_detection_module.cpp delete mode 100644 main/src/vtr_lidar/src/modules/planning/change_detection_module_v2.cpp delete mode 100644 main/src/vtr_lidar/src/modules/planning/fake_obstacle_module.cpp rename main/src/vtr_path_planning/include/vtr_path_planning/mpc/{custom_steam_evaluators.hpp => lateral_error_evaluators.hpp} (70%) diff --git a/config/honeycomb_grizzly_default.yaml b/config/honeycomb_grizzly_default.yaml index b4bb96e7d..14be8fced 100644 --- a/config/honeycomb_grizzly_default.yaml +++ b/config/honeycomb_grizzly_default.yaml @@ -39,7 +39,6 @@ # "lidar.intra_exp_merging", # "lidar.dynamic_detection", # "lidar.inter_exp_merging", - # "lidar.change_detection", # "lidar.ground_extraction", # "lidar.obstacle_detection", # "lidar.terrain_assessment", @@ -250,25 +249,6 @@ size_x: 16.0 size_y: 8.0 visualize: true - change_detection: - type: lidar.change_detection_v2 - detection_range: 8 - search_radius: 0.25 - negprob_threshold: 0.1 - use_prior: true - alpha0: 3.0 - beta0: 0.03 - use_support_filtering: true - support_radius: 0.25 - support_variance: 0.05 - support_threshold: 2.5 - resolution: 0.5 - size_x: 16.0 - size_y: 8.0 - run_online: false - run_async: true - visualize: false - save_module_result: false memory: type: graph_mem_manager vertex_life_span: 5 @@ -367,21 +347,10 @@ forward_vel: 1.1 max_lin_vel: 1.5 max_ang_vel: 1.5 - vehicle_model: "unicycle" - - # Cost Function Covariance Matrices - #pose_error_cov: [0.75, 0.75, 0.75, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 - #vel_error_cov: [1.0, 10.0] # was [0.1 2.0] # No longer using this cost term - #acc_error_cov: [1.0, 1.0] - #kin_error_cov: [0.001, 0.001, 0.001, 0.001, 0.001, 0.001] - #lat_error_cov: [100.0] + robot_linear_velocity_scale: 1.1 # Used to scale the output twist linear velocity messages by some constant factor to compensate internal low level control miscalibration + robot_angular_velocity_scale: 1.0 # Used to scale the output twist angular velocity messages by some constant factor to compensate internal low level control miscalibration - # Cost Function Weights - #pose_error_weight: 1.0 - #acc_error_weight: 1.0 - #kin_error_weight: 1.0 - #lat_error_weight: 0.01 - + vehicle_model: "unicycle" # Cost Function Covariance Matrices pose_error_cov: [30.0, 30.0, 300.0, 300.0, 300.0, 6.50] @@ -397,21 +366,6 @@ kin_error_weight: 1000.0 lat_error_weight: 0.01 - - # Cost Function Covariance Matrices - #pose_error_cov: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0] - #vel_error_cov: [10.0, 10.0] # was [0.1 2.0] # No longer using this cost term - #acc_error_cov: [2.0, 2.0] - #kin_error_cov: [0.05, 0.05, 0.05, 0.05, 0.05, 0.05] - #lat_error_cov: [10.0] - - # Cost Function Weights - #pose_error_weight: 5.0 - #vel_error_weight: 2.0 - #acc_error_weight: 1.0 - #kin_error_weight: 200.0 - #lat_error_weight: 0.5 - #backup for tracking mpc #pose_error_cov: [0.5, 0.5, 0.5, 10.0, 10.0, 10.0] # Used to have yaw components set to 1000 but it seems to cause some instability, others set to 1.0 #vel_error_cov: [0.1, 1.0] # was [0.1 2.0] diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp index 9368fa3a7..7b1bd0506 100644 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp +++ b/main/src/vtr_lidar/include/vtr_lidar/modules/modules.hpp @@ -41,12 +41,9 @@ #include "vtr_lidar/modules/pointmap/intra_exp_merging_module.hpp" #include "vtr_lidar/modules/pointmap/intra_exp_merging_module_v2.hpp" -#include "vtr_lidar/modules/planning/change_detection_module.hpp" -#include "vtr_lidar/modules/planning/change_detection_module_v2.hpp" #include "vtr_lidar/modules/planning/change_detection_module_v3.hpp" #include "vtr_lidar/modules/planning/ground_extraction_module.hpp" #include "vtr_lidar/modules/planning/obstacle_detection_module.hpp" #include "vtr_lidar/modules/planning/safe_corridor_module.hpp" #include "vtr_lidar/modules/planning/terrain_assessment_module.hpp" -#include "vtr_lidar/modules/planning/fake_obstacle_module.hpp" diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module.hpp deleted file mode 100644 index 79ce72e7f..000000000 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module.hpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file change_detection_module.hpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#pragma once - -#include "tf2/convert.h" -#include "tf2_eigen/tf2_eigen.hpp" -#include "tf2_ros/transform_broadcaster.h" - -#include "nav_msgs/msg/occupancy_grid.hpp" - -#include "vtr_lidar/cache.hpp" -#include "vtr_tactic/modules/base_module.hpp" -#include "vtr_tactic/task_queue.hpp" - -namespace vtr { -namespace lidar { - -class ChangeDetectionModule : public tactic::BaseModule { - public: - PTR_TYPEDEFS(ChangeDetectionModule); - using PointCloudMsg = sensor_msgs::msg::PointCloud2; - using OccupancyGridMsg = nav_msgs::msg::OccupancyGrid; - - static constexpr auto static_name = "lidar.change_detection"; - - /** \brief Collection of config parameters */ - struct Config : public BaseModule::Config { - PTR_TYPEDEFS(Config); - - // change detection - float detection_range = 10.0; - float search_radius = 1.0; - - // cost map - float resolution = 1.0; - float size_x = 20.0; - float size_y = 20.0; - - // - bool run_online = false; - bool run_async = false; - bool visualize = false; - bool save_module_result = false; - - static ConstPtr fromROS(const rclcpp::Node::SharedPtr &node, - const std::string ¶m_prefix); - }; - - ChangeDetectionModule( - const Config::ConstPtr &config, - const std::shared_ptr &module_factory = nullptr, - const std::string &name = static_name) - : tactic::BaseModule{module_factory, name}, config_(config) {} - - private: - void run_(tactic::QueryCache &qdata, tactic::OutputCache &output, - const tactic::Graph::Ptr &graph, - const tactic::TaskExecutor::Ptr &executor) override; - - void runAsync_(tactic::QueryCache &qdata, tactic::OutputCache &output, - const tactic::Graph::Ptr &graph, - const tactic::TaskExecutor::Ptr &executor, - const tactic::Task::Priority &priority, - const tactic::Task::DepId &dep_id) override; - - Config::ConstPtr config_; - - /** \brief mutex to make publisher thread safe */ - std::mutex mutex_; - - /** \brief for visualization only */ - bool publisher_initialized_ = false; - std::shared_ptr tf_bc_; - rclcpp::Publisher::SharedPtr scan_pub_; - rclcpp::Publisher::SharedPtr map_pub_; - rclcpp::Publisher::SharedPtr costmap_pub_; - rclcpp::Publisher::SharedPtr pointcloud_pub_; - - VTR_REGISTER_MODULE_DEC_TYPE(ChangeDetectionModule); -}; - -} // namespace lidar -} // namespace vtr diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v2.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v2.hpp deleted file mode 100644 index afdaf87f7..000000000 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/change_detection_module_v2.hpp +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file change_detection_module_v2.hpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#pragma once - -#include "tf2/convert.h" -#include "tf2_eigen/tf2_eigen.hpp" -#include "tf2_ros/transform_broadcaster.h" - -#include "nav_msgs/msg/occupancy_grid.hpp" - -#include "vtr_lidar/cache.hpp" -#include "vtr_tactic/modules/base_module.hpp" -#include "vtr_tactic/task_queue.hpp" - -namespace vtr { -namespace lidar { - -class ChangeDetectionModuleV2 : public tactic::BaseModule { - public: - PTR_TYPEDEFS(ChangeDetectionModuleV2); - using PointCloudMsg = sensor_msgs::msg::PointCloud2; - using OccupancyGridMsg = nav_msgs::msg::OccupancyGrid; - - static constexpr auto static_name = "lidar.change_detection_v2"; - - /** \brief Collection of config parameters */ - struct Config : public BaseModule::Config { - PTR_TYPEDEFS(Config); - - // change detection - float detection_range = 10.0; - float search_radius = 1.0; - float negprob_threshold = 1.0; - - bool use_prior = false; - float alpha0 = 1.0; - float beta0 = 0.1; - - bool use_support_filtering = false; - float support_radius = 0.25; - float support_variance = 0.1; - float support_threshold = 0.0; - - // cost map - float resolution = 1.0; - float size_x = 20.0; - float size_y = 20.0; - - // - bool run_online = false; - bool run_async = false; - bool visualize = false; - bool save_module_result = false; - - static ConstPtr fromROS(const rclcpp::Node::SharedPtr &node, - const std::string ¶m_prefix); - }; - - ChangeDetectionModuleV2( - const Config::ConstPtr &config, - const std::shared_ptr &module_factory = nullptr, - const std::string &name = static_name) - : tactic::BaseModule{module_factory, name}, config_(config) {} - - private: - void run_(tactic::QueryCache &qdata, tactic::OutputCache &output, - const tactic::Graph::Ptr &graph, - const tactic::TaskExecutor::Ptr &executor) override; - - void runAsync_(tactic::QueryCache &qdata, tactic::OutputCache &output, - const tactic::Graph::Ptr &graph, - const tactic::TaskExecutor::Ptr &executor, - const tactic::Task::Priority &priority, - const tactic::Task::DepId &dep_id) override; - - Config::ConstPtr config_; - - /** \brief mutex to make publisher thread safe */ - std::mutex mutex_; - - /** \brief for visualization only */ - bool publisher_initialized_ = false; - std::shared_ptr tf_bc_; - rclcpp::Publisher::SharedPtr scan_pub_; - rclcpp::Publisher::SharedPtr map_pub_; - rclcpp::Publisher::SharedPtr costmap_pub_; - rclcpp::Publisher::SharedPtr costpcd_pub_; - - VTR_REGISTER_MODULE_DEC_TYPE(ChangeDetectionModuleV2); -}; - -} // namespace lidar -} // namespace vtr diff --git a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/fake_obstacle_module.hpp b/main/src/vtr_lidar/include/vtr_lidar/modules/planning/fake_obstacle_module.hpp deleted file mode 100644 index 971d9a84c..000000000 --- a/main/src/vtr_lidar/include/vtr_lidar/modules/planning/fake_obstacle_module.hpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file fake_obstacle_module.hpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#pragma once - -#include "vtr_lidar/cache.hpp" -#include "vtr_tactic/modules/base_module.hpp" -#include "vtr_tactic/task_queue.hpp" - -#include "vtr_lidar/mesh2pcd/mesh2pcd.hpp" - -namespace vtr { -namespace lidar { - -class FakeObstacleModule : public tactic::BaseModule { - public: - PTR_TYPEDEFS(FakeObstacleModule); - using PointCloudMsg = sensor_msgs::msg::PointCloud2; - - static constexpr auto static_name = "lidar.fake_obstacle"; - - /** \brief Collection of config parameters */ - struct Config : public BaseModule::Config { - PTR_TYPEDEFS(Config); - - std::string path; - std::vector objs; - - std::vector fixed_types; - std::vector fixed_xs; - std::vector fixed_ys; - std::vector fixed_zs; - std::vector fixed_rolls; - std::vector fixed_pitchs; - std::vector fixed_yaws; - - // general - bool visualize = false; - - static ConstPtr fromROS(const rclcpp::Node::SharedPtr &node, - const std::string ¶m_prefix); - }; - - FakeObstacleModule( - const Config::ConstPtr &config, - const std::shared_ptr &module_factory = nullptr, - const std::string &name = static_name); - - private: - void run_(tactic::QueryCache &qdata, tactic::OutputCache &output, - const tactic::Graph::Ptr &graph, - const tactic::TaskExecutor::Ptr &executor) override; - - Config::ConstPtr config_; - - std::vector> obj_T_scan_objs_; - std::vector converters_; - - /** \brief for visualization only */ - bool publisher_initialized_ = false; - rclcpp::Publisher::SharedPtr pointcloud_pub_; - - VTR_REGISTER_MODULE_DEC_TYPE(FakeObstacleModule); -}; - -} // namespace lidar -} // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp b/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp index 4c7fc1f8f..212913c24 100644 --- a/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp +++ b/main/src/vtr_lidar/src/modules/localization/localization_icp_module.cpp @@ -329,7 +329,6 @@ void LocalizationICPModule::run_(QueryCache &qdata0, OutputCache &, *qdata.T_r_v_loc = T_r_v_icp; // set success *qdata.loc_success = true; - CLOG(WARNING, "lidar.localization_icp") << "The resulting transform is:" << T_r_v_icp; } else { CLOG(WARNING, "lidar.localization_icp") << "Matched points ratio " << matched_points_ratio diff --git a/main/src/vtr_lidar/src/modules/planning/change_detection_module.cpp b/main/src/vtr_lidar/src/modules/planning/change_detection_module.cpp deleted file mode 100644 index ee291891d..000000000 --- a/main/src/vtr_lidar/src/modules/planning/change_detection_module.cpp +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file change_detection_module.cpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#include "vtr_lidar/modules/planning/change_detection_module.hpp" - -#include "vtr_lidar/data_types/costmap.hpp" -#include "vtr_lidar/utils/nanoflann_utils.hpp" - -namespace vtr { -namespace lidar { - -namespace { - -template -void computeMeanAndStdDev(const pcl::PointCloud &points, float &mean, - float &std_dev) { - // - const auto sum = std::accumulate( - points.begin(), points.end(), 0.0, - [](const float sum, const PointT &p) { return sum + p.z; }); - mean = sum / points.size(); - // - const auto sq_sum = - std::accumulate(points.begin(), points.end(), 0.0, - [mean](const float sq_sum, const PointT &p) { - return sq_sum + (p.z - mean) * (p.z - mean); - }); - const auto var = sq_sum / points.size(); - std_dev = var; -} - -#if false -template -class DetectChangeOp { - public: - DetectChangeOp(const pcl::PointCloud &points, - const std::vector &distances, - const std::vector &roughnesses, - const float &search_radius) - : points_(points), - distances_(distances), - roughnesses_(roughnesses), - sq_search_radius_(search_radius * search_radius), - adapter_(points) { - /// create kd-tree of the point cloud for radius search - kdtree_ = std::make_unique>(2, adapter_, - KDTreeParams(/* max leaf */ 10)); - kdtree_->buildIndex(); - // search params setup - search_params_.sorted = false; - } - - void operator()(const Eigen::Vector2f &point, float &value) const { - /// find the nearest neighbors - std::vector> inds_dists; - size_t num_neighbors = kdtree_->radiusSearch( - point.data(), sq_search_radius_, inds_dists, search_params_); - - if (num_neighbors < 1) { -#if false - CLOG(WARNING, "lidar.change_detection") - << "looking at point: <" << x << "," << y << ">, roughness: 0" - << " (no enough neighbors)"; -#endif - value = 10.0; /// \todo arbitrary high value - return; - } - std::vector distances; - std::vector roughnesses; - std::vector neg_logprobs; - distances.reserve(num_neighbors); - roughnesses.reserve(num_neighbors); - neg_logprobs.reserve(num_neighbors); - for (size_t i = 0; i < num_neighbors; ++i) { - distances.emplace_back(distances_[inds_dists[i].first]); - roughnesses.emplace_back(std::sqrt(roughnesses_[inds_dists[i].first])); - - const auto &dist = distances_[inds_dists[i].first]; - const auto &rough = roughnesses_[inds_dists[i].first]; - // std::pow(dist, 2) / rough / 2 + std::log(std::sqrt(rough)) - neg_logprobs.emplace_back(std::pow(dist, 2) / (rough + 0.01) / 2.0); - } - // CLOG(DEBUG, "lidar.change_detection") - // << "\n Distance is " << distances << "\n roughness is " << - // roughnesses; - // use the negative log probability as the cost - // value = *std::max_element(neg_logprobs.begin(), neg_logprobs.end()); - value = neg_logprobs[(size_t)std::floor((float)neg_logprobs.size() / 2)]; - } - - private: - /** \brief reference to the point cloud */ - const pcl::PointCloud &points_; - const std::vector &distances_; - const std::vector &roughnesses_; - - /** \brief squared search radius */ - const float sq_search_radius_; - - KDTreeSearchParams search_params_; - NanoFLANNAdapter adapter_; - std::unique_ptr> kdtree_; -}; -#endif -} // namespace - -using namespace tactic; - -auto ChangeDetectionModule::Config::fromROS(const rclcpp::Node::SharedPtr &node, - const std::string ¶m_prefix) - -> ConstPtr { - auto config = std::make_shared(); - // clang-format off - // change detection - config->detection_range = node->declare_parameter(param_prefix + "detection_range", config->detection_range); - config->search_radius = node->declare_parameter(param_prefix + ".search_radius", config->search_radius); - // cost map - config->resolution = node->declare_parameter(param_prefix + ".resolution", config->resolution); - config->size_x = node->declare_parameter(param_prefix + ".size_x", config->size_x); - config->size_y = node->declare_parameter(param_prefix + ".size_y", config->size_y); - // general - config->run_online = node->declare_parameter(param_prefix + ".run_online", config->run_online); - config->run_async = node->declare_parameter(param_prefix + ".run_async", config->run_async); - config->visualize = node->declare_parameter(param_prefix + ".visualize", config->visualize); - config->save_module_result = node->declare_parameter(param_prefix + ".save_module_result", config->save_module_result); - // clang-format on - return config; -} - -void ChangeDetectionModule::run_(QueryCache &qdata0, OutputCache &output0, - const Graph::Ptr &graph, - const TaskExecutor::Ptr &executor) { - auto &qdata = dynamic_cast(qdata0); - // auto &output = dynamic_cast(output0); - - const auto &vid_loc = *qdata.vid_loc; - - if (config_->run_async) - executor->dispatch(std::make_shared( - shared_from_this(), qdata0.shared_from_this(), 0, Task::DepIdSet{}, - Task::DepId{}, "Change Detection", vid_loc)); - else - runAsync_(qdata0, output0, graph, executor, Task::Priority(-1), - Task::DepId()); -} - -void ChangeDetectionModule::runAsync_(QueryCache &qdata0, OutputCache &output0, - const Graph::Ptr &graph, - const TaskExecutor::Ptr &, - const Task::Priority &, - const Task::DepId &) { - auto &qdata = dynamic_cast(qdata0); - auto &output = dynamic_cast(output0); - - if (config_->run_online && - output.chain->trunkSequenceId() != *qdata.sid_loc) { - CLOG(INFO, "lidar.change_detection") - << "Trunk id has changed, skip change detection for this scan"; - return; - } - - /// save module result setup - // centroid -> x, y, z - // normal -> normal_x, normal_y, normal_z - // query -> flex11, flex12, flex13 - // distance (point to plane) -> flex21 - // roughness -> flex22 - // number of observations -> flex23 - // fake point? -> flex24 (1 yes, 0 no) - pcl::PointCloud module_result; - if (config_->save_module_result) module_result.reserve(5000); - - /// visualization setup - if (config_->visualize) { - std::unique_lock lock(mutex_); - if (!publisher_initialized_) { - // clang-format off - tf_bc_ = std::make_shared(*qdata.node); - scan_pub_ = qdata.node->create_publisher("change_detection_scan", 5); - map_pub_ = qdata.node->create_publisher("change_detection_map", 5); - costmap_pub_ = qdata.node->create_publisher("change_detection_costmap", 5); - pointcloud_pub_ = qdata.node->create_publisher("change_detection_pointcloud", 5); - // clang-format on - publisher_initialized_ = true; - } - } - - // inputs - const auto &stamp = *qdata.stamp; - const auto &T_s_r = *qdata.T_s_r; - // const auto &vid_loc = *qdata.vid_loc; - // const auto &sid_loc = *qdata.sid_loc; - const auto &T_r_v_loc = *qdata.T_r_v_loc; - const auto &points = *qdata.undistorted_point_cloud; - const auto &submap_loc = *qdata.submap_loc; - const auto &map_point_cloud = submap_loc.point_cloud(); - const auto &T_v_m_loc = *qdata.T_v_m_loc; - - CLOG(INFO, "lidar.change_detection") - << "Change detection for lidar scan at stamp: " << stamp; - - // filter out points that are too far away - std::vector query_indices; - for (size_t i = 0; i < points.size(); ++i) { - const auto &pt = points.at(i); - if (pt.getVector3fMap().norm() < config_->detection_range) - query_indices.emplace_back(i); - } - pcl::PointCloud query_points(points, query_indices); - - // Eigen matrix of original data (only shallow copy of ref clouds) - // clang-format off - const auto query_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - const auto query_norms_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - // clang-format on - - // retrieve the pre-processed scan and convert it to the localization frame - pcl::PointCloud aligned_points(query_points); - // clang-format off - auto aligned_mat = aligned_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - auto aligned_norms_mat = aligned_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - // clang-format on - - const auto T_m_s = (T_s_r * T_r_v_loc * T_v_m_loc).inverse().matrix(); - aligned_mat = T_m_s.cast() * query_mat; - aligned_norms_mat = T_m_s.cast() * query_norms_mat; - - // create kd-tree of the map (2d only) - NanoFLANNAdapter adapter(map_point_cloud); - KDTreeSearchParams search_params; - KDTreeParams tree_params(10 /* max leaf */); - auto kdtree = - std::make_unique>(2, adapter, tree_params); - kdtree->buildIndex(); - - std::vector nn_dists(aligned_points.size(), -1.0f); - // compute point to plane distance - const auto sq_search_radius = config_->search_radius * config_->search_radius; - std::vector roughnesses(aligned_points.size(), 0.0f); - for (size_t i = 0; i < aligned_points.size(); i++) { - // dump result - if (config_->save_module_result) { - auto &p = module_result.emplace_back(); - // set query point location info - p.flex11 = aligned_points[i].x; - p.flex12 = aligned_points[i].y; - p.flex13 = aligned_points[i].z; - // - p.flex14 = -1.0; // invalid normal agreement - // - p.flex21 = -1.0; // invalid distance - p.flex22 = -1.0; // invalid roughness - p.flex23 = -1.0; // invalid number of observations - p.flex24 = aligned_points[i].flex24; // fake point copied directly - } - - // radius search of the closest point - std::vector> inds_dists; - kdtree->radiusSearch(aligned_points[i].data, sq_search_radius, inds_dists, - search_params); - std::vector indices; - indices.reserve(inds_dists.size()); - for (const auto &ind_dist : inds_dists) indices.push_back(ind_dist.first); - - // filter based on neighbors in map /// \todo parameters - if (indices.size() < 10) continue; - - // compute the planar distance - float mean; - computeMeanAndStdDev( - pcl::PointCloud(map_point_cloud, indices), mean, - roughnesses[i]); - - const auto diff = aligned_points[i].z - mean; - nn_dists[i] = std::abs(diff); - - // save module result - if (config_->save_module_result) { - auto &p = module_result.back(); - // set query point location info - // clang-format off - p.getVector3fMap() = Eigen::Vector3f{aligned_points[i].x, aligned_points[i].y, mean}; - p.getNormalVector3fMap() = Eigen::Vector3f{0, 0, 1}; - p.flex21 = nn_dists[i]; - p.flex22 = roughnesses[i]; - p.flex23 = static_cast(indices.size()); - // clang-format on - } - } - - for (size_t i = 0; i < aligned_points.size(); i++) { - aligned_points[i].flex23 = 0.0f; - if (nn_dists[i] < 0.0 || (std::pow(nn_dists[i], 2) > roughnesses[i])) - aligned_points[i].flex23 = 1.0f; - } - - // retrieve the pre-processed scan and convert it to the robot frame - pcl::PointCloud aligned_points2(aligned_points); - // clang-format off - auto aligned_mat2 = aligned_points2.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - auto aligned_norms_mat2 = aligned_points2.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - // clang-format on - - const auto T_r_m = (T_m_s * T_s_r).inverse().matrix(); - aligned_mat2 = T_r_m.cast() * aligned_mat; - aligned_norms_mat2 = T_r_m.cast() * aligned_norms_mat; - -#if false - // project to 2d and construct the grid map - const auto costmap = std::make_shared( - config_->resolution, config_->size_x, config_->size_y); - // update cost map based on change detection result - DetectChangeOp detect_change_op( - aligned_points2, nn_dists, nn_roughnesses, config_->search_radius); - costmap->update(detect_change_op); - // add transform to the localization vertex - costmap->T_vertex_this() = T_r_v_loc.inverse(); - costmap->vertex_id() = vid_loc; - costmap->vertex_sid() = sid_loc; -#endif - - /// - if (config_->save_module_result) { - // save result - CLOG(INFO, "lidar.change_detection") << "Saving change detection result"; - const auto ros_msg = std::make_shared(); - pcl::toROSMsg(module_result, *ros_msg); - using PoincCloudMsgLM = storage::LockableMessage; - auto msg = std::make_shared(ros_msg, *qdata.stamp); - graph->write("change_detection_result", - "sensor_msgs/msg/PointCloud2", msg); - } - - /// publish the transformed pointcloud - if (config_->visualize) { - std::unique_lock lock(mutex_); - // - const auto T_w_v_loc = config_->run_online - ? output.chain->pose(*qdata.sid_loc) // online - : T_v_m_loc.inverse(); // offline - - if (!config_->run_online) { - // publish the aligned points - PointCloudMsg scan_msg; - pcl::toROSMsg(aligned_points, scan_msg); - scan_msg.header.frame_id = "world"; - // scan_msg.header.stamp = rclcpp::Time(*qdata.stamp); - scan_pub_->publish(scan_msg); - - // publish the submap for localization - PointCloudMsg submap_msg; - pcl::toROSMsg(map_point_cloud, submap_msg); - submap_msg.header.frame_id = "world (offset)"; - // submap_msg.header.stamp = rclcpp::Time(*qdata.stamp); - map_pub_->publish(submap_msg); - } -#if false - // publish the occupancy grid origin - Eigen::Affine3d T((T_w_v_loc * T_r_v_loc.inverse()).matrix()); - auto tf_msg = tf2::eigenToTransform(T); - tf_msg.header.frame_id = "world (offset)"; - // tf_msg.header.stamp = rclcpp::Time(*qdata.stamp); - tf_msg.child_frame_id = "change detection"; - tf_bc_->sendTransform(tf_msg); - - // publish the occupancy grid - auto costmap_msg = costmap->toCostMapMsg(); - costmap_msg.header.frame_id = "change detection"; - // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); - costmap_pub_->publish(costmap_msg); - - // publish the point cloud - auto pointcloud_msg = costmap->toPointCloudMsg(); - pointcloud_msg.header.frame_id = "change detection"; - // pointcloud_msg.header.stamp = rclcpp::Time(*qdata.stamp); - pointcloud_pub_->publish(pointcloud_msg); -#endif - } - - /// output -#if false - auto change_detection_costmap_ref = output.change_detection_costmap.locked(); - auto &change_detection_costmap = change_detection_costmap_ref.get(); - change_detection_costmap = costmap; -#endif - - CLOG(INFO, "lidar.change_detection") - << "Change detection for lidar scan at stamp: " << stamp << " - DONE"; -} - -} // namespace lidar -} // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v2.cpp b/main/src/vtr_lidar/src/modules/planning/change_detection_module_v2.cpp deleted file mode 100644 index de30d0964..000000000 --- a/main/src/vtr_lidar/src/modules/planning/change_detection_module_v2.cpp +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file change_detection_module_v2.cpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#include "vtr_lidar/modules/planning/change_detection_module_v2.hpp" - -#include "pcl/features/normal_3d.h" - -#include "vtr_lidar/data_types/costmap.hpp" -#include "vtr_lidar/utils/nanoflann_utils.hpp" - -namespace vtr { -namespace lidar { - -namespace { - -template -void computeCentroidAndNormal(const pcl::PointCloud &points, - Eigen::Vector3f ¢roid, - Eigen::Vector3f &normal, float &roughness) { - // 16-bytes aligned placeholder for the XYZ centroid of a surface patch - Eigen::Vector4f centroid_homo; - // Placeholder for the 3x3 covariance matrix at each surface patch - Eigen::Matrix3f cov; - - // Estimate the XYZ centroid - pcl::compute3DCentroid(points, centroid_homo); - - // Compute the 3x3 covariance matrix - pcl::computeCovarianceMatrix(points, centroid_homo, cov); - - // Compute pca - Eigen::SelfAdjointEigenSolver es; - es.compute(cov); - - // save results - centroid = centroid_homo.head<3>(); - normal = es.eigenvectors().col(0); - roughness = es.eigenvalues()(0); // variance -} - -#if false -template -class DetectChangeOp { - public: - DetectChangeOp(const pcl::PointCloud &points, - const std::vector &distances, - const std::vector &roughnesses, - const float &search_radius) - : points_(points), - distances_(distances), - roughnesses_(roughnesses), - sq_search_radius_(search_radius * search_radius), - adapter_(points) { - /// create kd-tree of the point cloud for radius search - kdtree_ = std::make_unique>(2, adapter_, - KDTreeParams(/* max leaf */ 10)); - kdtree_->buildIndex(); - // search params setup - search_params_.sorted = false; - } - - void operator()(const Eigen::Vector2f &point, float &value) const { - /// find the nearest neighbors - std::vector> inds_dists; - size_t num_neighbors = kdtree_->radiusSearch( - point.data(), sq_search_radius_, inds_dists, search_params_); - - if (num_neighbors < 1) { -#if false - CLOG(WARNING, "lidar.change_detection") - << "looking at point: <" << x << "," << y << ">, roughness: 0" - << " (no enough neighbors)"; -#endif - value = 10.0; /// \todo arbitrary high value - return; - } - std::vector distances; - std::vector roughnesses; - std::vector neg_logprobs; - distances.reserve(num_neighbors); - roughnesses.reserve(num_neighbors); - neg_logprobs.reserve(num_neighbors); - for (size_t i = 0; i < num_neighbors; ++i) { - distances.emplace_back(distances_[inds_dists[i].first]); - roughnesses.emplace_back(std::sqrt(roughnesses_[inds_dists[i].first])); - - const auto &dist = distances_[inds_dists[i].first]; - const auto &rough = roughnesses_[inds_dists[i].first]; - // std::pow(dist, 2) / rough / 2 + std::log(std::sqrt(rough)) - neg_logprobs.emplace_back(std::pow(dist, 2) / (rough + 0.01) / 2.0); - } - // CLOG(DEBUG, "lidar.change_detection") - // << "\n Distance is " << distances << "\n roughness is " << - // roughnesses; - // use the negative log probability as the cost - // value = *std::max_element(neg_logprobs.begin(), neg_logprobs.end()); - value = neg_logprobs[(size_t)std::floor((float)neg_logprobs.size() / 2)]; - } - - private: - /** \brief reference to the point cloud */ - const pcl::PointCloud &points_; - const std::vector &distances_; - const std::vector &roughnesses_; - - /** \brief squared search radius */ - const float sq_search_radius_; - - KDTreeSearchParams search_params_; - NanoFLANNAdapter adapter_; - std::unique_ptr> kdtree_; -}; -#endif -} // namespace - -using namespace tactic; - -auto ChangeDetectionModuleV2::Config::fromROS( - const rclcpp::Node::SharedPtr &node, const std::string ¶m_prefix) - -> ConstPtr { - auto config = std::make_shared(); - // clang-format off - // change detection - config->detection_range = node->declare_parameter(param_prefix + ".detection_range", config->detection_range); - config->search_radius = node->declare_parameter(param_prefix + ".search_radius", config->search_radius); - config->negprob_threshold = node->declare_parameter(param_prefix + ".negprob_threshold", config->negprob_threshold); - // prior on roughness - config->use_prior = node->declare_parameter(param_prefix + ".use_prior", config->use_prior); - config->alpha0 = node->declare_parameter(param_prefix + ".alpha0", config->alpha0); - config->beta0 = node->declare_parameter(param_prefix + ".beta0", config->beta0); - // support - config->use_support_filtering = node->declare_parameter(param_prefix + ".use_support_filtering", config->use_support_filtering); - config->support_radius = node->declare_parameter(param_prefix + ".support_radius", config->support_radius); - config->support_variance = node->declare_parameter(param_prefix + ".support_variance", config->support_variance); - config->support_threshold = node->declare_parameter(param_prefix + ".support_threshold", config->support_threshold); - // cost map - config->resolution = node->declare_parameter(param_prefix + ".resolution", config->resolution); - config->size_x = node->declare_parameter(param_prefix + ".size_x", config->size_x); - config->size_y = node->declare_parameter(param_prefix + ".size_y", config->size_y); - // general - config->run_online = node->declare_parameter(param_prefix + ".run_online", config->run_online); - config->run_async = node->declare_parameter(param_prefix + ".run_async", config->run_async); - config->visualize = node->declare_parameter(param_prefix + ".visualize", config->visualize); - config->save_module_result = node->declare_parameter(param_prefix + ".save_module_result", config->save_module_result); - // clang-format on - return config; -} - -void ChangeDetectionModuleV2::run_(QueryCache &qdata0, OutputCache &output0, - const Graph::Ptr &graph, - const TaskExecutor::Ptr &executor) { - auto &qdata = dynamic_cast(qdata0); - // auto &output = dynamic_cast(output0); - - const auto &vid_loc = *qdata.vid_loc; - - if (config_->run_async) - executor->dispatch(std::make_shared( - shared_from_this(), qdata0.shared_from_this(), 0, Task::DepIdSet{}, - Task::DepId{}, "Change Detection", vid_loc)); - else - runAsync_(qdata0, output0, graph, executor, Task::Priority(-1), - Task::DepId()); -} - -void ChangeDetectionModuleV2::runAsync_( - QueryCache &qdata0, OutputCache &output0, const Graph::Ptr &graph, - const TaskExecutor::Ptr &, const Task::Priority &, const Task::DepId &) { - auto &qdata = dynamic_cast(qdata0); - auto &output = dynamic_cast(output0); - - if (config_->run_online && - output.chain->trunkSequenceId() != *qdata.sid_loc) { - CLOG(INFO, "lidar.change_detection") - << "Trunk id has changed, skip change detection for this scan"; - return; - } - - /// save module result setup - // centroid -> x, y, z - // normal -> normal_x, normal_y, normal_z - // query -> flex11, flex12, flex13 - // distance (point to plane) -> flex21 - // roughness -> flex22 - // number of observations -> flex23 - // fake point? -> flex24 (1 yes, 0 no) - pcl::PointCloud module_result; - if (config_->save_module_result) module_result.reserve(5000); - - /// visualization setup - if (config_->visualize) { - std::unique_lock lock(mutex_); - if (!publisher_initialized_) { - // clang-format off - tf_bc_ = std::make_shared(*qdata.node); - scan_pub_ = qdata.node->create_publisher("change_detection_scan", 5); - map_pub_ = qdata.node->create_publisher("change_detection_map", 5); - costmap_pub_ = qdata.node->create_publisher("change_detection_costmap", 5); - costpcd_pub_ = qdata.node->create_publisher("change_detection_costpcd", 5); - // clang-format on - publisher_initialized_ = true; - } - } - - // inputs - const auto &stamp = *qdata.stamp; - const auto &T_s_r = *qdata.T_s_r; - // const auto &vid_loc = *qdata.vid_loc; - // const auto &sid_loc = *qdata.sid_loc; - const auto &T_r_v_loc = *qdata.T_r_v_loc; - const auto &points = *qdata.undistorted_point_cloud; - const auto &submap_loc = *qdata.submap_loc; - const auto &map_point_cloud = submap_loc.point_cloud(); - const auto &T_v_m_loc = *qdata.T_v_m_loc; - - // clang-format off - CLOG(INFO, "lidar.change_detection") << "Change detection for lidar scan at stamp: " << stamp; - - // filter out points that are too far away - std::vector query_indices; - for (size_t i = 0; i < points.size(); ++i) { - const auto &pt = points.at(i); - if (pt.getVector3fMap().norm() < config_->detection_range) - query_indices.emplace_back(i); - } - pcl::PointCloud query_points(points, query_indices); - - // Eigen matrix of original data (only shallow copy of ref clouds) - const auto query_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - const auto query_norms_mat = query_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - - // retrieve the pre-processed scan and convert it to the localization frame - pcl::PointCloud aligned_points(query_points); - auto aligned_mat = aligned_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - auto aligned_norms_mat = aligned_points.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - - const auto T_m_s = (T_s_r * T_r_v_loc * T_v_m_loc).inverse().matrix(); - aligned_mat = T_m_s.cast() * query_mat; - aligned_norms_mat = T_m_s.cast() * query_norms_mat; - - // create kd-tree of the map - NanoFLANNAdapter adapter(map_point_cloud); - KDTreeSearchParams search_params; - KDTreeParams tree_params(10 /* max leaf */); - auto kdtree = std::make_unique>(3, adapter, tree_params); - kdtree->buildIndex(); - - std::vector nn_inds(aligned_points.size()); - std::vector nn_dists(aligned_points.size(), -1.0f); - // compute nearest neighbors and point to point distances - for (size_t i = 0; i < aligned_points.size(); i++) { - KDTreeResultSet result_set(1); - result_set.init(&nn_inds[i], &nn_dists[i]); - kdtree->findNeighbors(result_set, aligned_points[i].data, search_params); - } - // compute point to plane distance - const auto sq_search_radius = config_->search_radius * config_->search_radius; - std::vector roughnesses(aligned_points.size(), 0.0f); - std::vector num_measurements(aligned_points.size(), 0.0f); - for (size_t i = 0; i < aligned_points.size(); i++) { - // dump result - if (config_->save_module_result) { - auto &p = module_result.emplace_back(); - // set query point location info - p.flex11 = aligned_points[i].x; - p.flex12 = aligned_points[i].y; - p.flex13 = aligned_points[i].z; - // - p.flex21 = -1.0; // invalid distance - p.flex22 = -1.0; // invalid roughness - p.flex23 = -1.0; // invalid number of observations - p.flex24 = aligned_points[i].flex24; // fake point copied directly - } - - // radius search of the closest point - std::vector dists; - std::vector indices; - NanoFLANNRadiusResultSet result(sq_search_radius, dists, indices); - kdtree->radiusSearchCustomCallback(map_point_cloud[nn_inds[i]].data, result, search_params); - - // filter based on neighbors in map /// \todo parameters - if (indices.size() < 10) continue; - - // - num_measurements[i] = static_cast(indices.size()); - - // compute the planar distance - Eigen::Vector3f centroid, normal; - computeCentroidAndNormal(pcl::PointCloud(map_point_cloud, indices), centroid, normal, roughnesses[i]); - - const auto diff = aligned_points[i].getVector3fMap() - centroid; - nn_dists[i] = std::abs(diff.dot(normal)); - - // save module result - if (config_->save_module_result) { - auto &p = module_result.back(); - // set query point location info - // clang-format off - p.getVector3fMap() = centroid; - p.getNormalVector3fMap() = normal; - // - p.flex21 = nn_dists[i]; - p.flex22 = roughnesses[i]; - p.flex23 = static_cast(indices.size()); - // clang-format on - } - } - - for (size_t i = 0; i < aligned_points.size(); i++) { - aligned_points[i].flex23 = 0.0f; - // - const auto cost = [&]() -> float { - // clang-format off - if (config_->use_prior) { - const float alpha_n = config_->alpha0 + num_measurements[i] / 2.0f; - const float beta_n = config_->beta0 + roughnesses[i] * num_measurements[i] / 2.0f; - const float roughness = beta_n / alpha_n; - const float df = 2 * alpha_n; - const float sqdists = nn_dists[i] * nn_dists[i] / roughness; - return -std::log(std::pow(1 + sqdists / df, -(df + 1) / 2)); - } else { - const float roughness = roughnesses[i]; - return (nn_dists[i] * nn_dists[i]) / (2 * roughness) + std::log(std::sqrt(roughness)); - } - // clang-format on - }(); - // - if (nn_dists[i] < 0.0 || (cost > config_->negprob_threshold)) - aligned_points[i].flex23 = 1.0f; - } - - // add support region - if (config_->use_support_filtering) { - // create kd-tree of the aligned points - NanoFLANNAdapter query_adapter(aligned_points); - KDTreeSearchParams search_params; - KDTreeParams tree_params(/* max leaf */ 10); - auto query_kdtree = - std::make_unique>(3, query_adapter, tree_params); - query_kdtree->buildIndex(); - // - std::vector toremove; - toremove.reserve(100); - const float sq_support_radius = std::pow(config_->support_radius, 2); - for (size_t i = 0; i < aligned_points.size(); i++) { - // ignore non-change points - if (aligned_points[i].flex23 == 0.0f) continue; - - // - std::vector> inds_dists; - inds_dists.reserve(10); - query_kdtree->radiusSearch(aligned_points[i].data, sq_support_radius, - inds_dists, search_params); - // - float support = 0.0f; - for (const auto &ind_dist : inds_dists) { - if (ind_dist.first == i) continue; - support += aligned_points[ind_dist.first].flex23 * - std::exp(-ind_dist.second / (2 * config_->support_variance)); - } - // - if (support < config_->support_threshold) toremove.push_back(i); - } - // change back to non-change points - for (const auto &i : toremove) aligned_points[i].flex23 = 0.0f; - } - - // retrieve the pre-processed scan and convert it to the robot frame - pcl::PointCloud aligned_points2(aligned_points); - // clang-format off - auto aligned_mat2 = aligned_points2.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::cartesian_offset()); - auto aligned_norms_mat2 = aligned_points2.getMatrixXfMap(4, PointWithInfo::size(), PointWithInfo::normal_offset()); - // clang-format on - - const auto T_r_m = (T_m_s * T_s_r).inverse().matrix(); - aligned_mat2 = T_r_m.cast() * aligned_mat; - aligned_norms_mat2 = T_r_m.cast() * aligned_norms_mat; - -#if false - // project to 2d and construct the grid map - const auto costmap = std::make_shared(config_->resolution, config_->size_x, config_->size_y); - // update cost map based on change detection result - DetectChangeOp detect_change_op(aligned_points2, nn_dists, nn_roughnesses, config_->search_radius); - costmap->update(detect_change_op); - // add transform to the localization vertex - costmap->T_vertex_this() = T_r_v_loc.inverse(); - costmap->vertex_id() = vid_loc; - costmap->vertex_sid() = sid_loc; -#endif - - /// - if (config_->save_module_result) { - // save result - CLOG(INFO, "lidar.change_detection") << "Saving change detection result"; - const auto ros_msg = std::make_shared(); - pcl::toROSMsg(module_result, *ros_msg); - using PoincCloudMsgLM = storage::LockableMessage; - auto msg = std::make_shared(ros_msg, *qdata.stamp); - graph->write("change_detection_v2_result", - "sensor_msgs/msg/PointCloud2", msg); - } - - /// publish the transformed pointcloud - if (config_->visualize) { - std::unique_lock lock(mutex_); - // - const auto T_w_v_loc = config_->run_online - ? output.chain->pose(*qdata.sid_loc) // online - : T_v_m_loc.inverse(); // offline - - if (!config_->run_online) { - // publish the aligned points - PointCloudMsg scan_msg; - pcl::toROSMsg(aligned_points, scan_msg); - scan_msg.header.frame_id = "world"; - // scan_msg.header.stamp = rclcpp::Time(*qdata.stamp); - scan_pub_->publish(scan_msg); - - // publish the submap for localization - PointCloudMsg submap_msg; - pcl::toROSMsg(map_point_cloud, submap_msg); - submap_msg.header.frame_id = "world"; - // submap_msg.header.stamp = rclcpp::Time(*qdata.stamp); - map_pub_->publish(submap_msg); - } -#if false - // publish the occupancy grid origin - Eigen::Affine3d T((T_w_v_loc * T_r_v_loc.inverse()).matrix()); - auto tf_msg = tf2::eigenToTransform(T); - tf_msg.header.frame_id = "world (offset)"; - // tf_msg.header.stamp = rclcpp::Time(*qdata.stamp); - tf_msg.child_frame_id = "change detection"; - tf_bc_->sendTransform(tf_msg); - - // publish the occupancy grid - auto costmap_msg = costmap->toCostMapMsg(); - costmap_msg.header.frame_id = "change detection"; - // costmap_msg.header.stamp = rclcpp::Time(*qdata.stamp); - costmap_pub_->publish(costmap_msg); - - // publish the point cloud - auto pointcloud_msg = costmap->toPointCloudMsg(); - pointcloud_msg.header.frame_id = "change detection"; - // pointcloud_msg.header.stamp = rclcpp::Time(*qdata.stamp); - costpcd_pub_->publish(pointcloud_msg); -#endif - } - - /// output -#if false - auto change_detection_costmap_ref = output.change_detection_costmap.locked(); - auto &change_detection_costmap = change_detection_costmap_ref.get(); - change_detection_costmap = costmap; -#endif - - CLOG(INFO, "lidar.change_detection") - << "Change detection for lidar scan at stamp: " << stamp << " - DONE"; -} - -} // namespace lidar -} // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_lidar/src/modules/planning/fake_obstacle_module.cpp b/main/src/vtr_lidar/src/modules/planning/fake_obstacle_module.cpp deleted file mode 100644 index 98a598d13..000000000 --- a/main/src/vtr_lidar/src/modules/planning/fake_obstacle_module.cpp +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021, Autonomous Space Robotics Lab (ASRL) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/** - * \file fake_obstacle_module.cpp - * \author Yuchen Wu, Autonomous Space Robotics Lab (ASRL) - */ -#include "vtr_lidar/modules/planning/fake_obstacle_module.hpp" - -namespace vtr { -namespace lidar { - -using namespace tactic; - -auto FakeObstacleModule::Config::fromROS(const rclcpp::Node::SharedPtr &node, - const std::string ¶m_prefix) - -> ConstPtr { - auto config = std::make_shared(); - // clang-format off - // object path - config->path = node->declare_parameter(param_prefix + ".path", std::string{}); - config->objs = node->declare_parameter>(param_prefix + ".objs", std::vector{}); - // object locations - config->fixed_types = node->declare_parameter>(param_prefix + ".types", std::vector{}); - config->fixed_xs = node->declare_parameter>(param_prefix + ".xs", std::vector{}); - config->fixed_ys = node->declare_parameter>(param_prefix + ".ys", std::vector{}); - config->fixed_zs = node->declare_parameter>(param_prefix + ".zs", std::vector{}); - config->fixed_rolls = node->declare_parameter>(param_prefix + ".rolls", std::vector{}); - config->fixed_pitchs = node->declare_parameter>(param_prefix + ".pitchs", std::vector{}); - config->fixed_yaws = node->declare_parameter>(param_prefix + ".yaws", std::vector{}); - // general - config->visualize = node->declare_parameter(param_prefix + ".visualize", config->visualize); - // clang-format on - return config; -} - -FakeObstacleModule::FakeObstacleModule( - const Config::ConstPtr &config, - const std::shared_ptr &module_factory, - const std::string &name) - : tactic::BaseModule{module_factory, name}, config_(config) { - // hard-coded honeycomb - mesh2pcd::Mesh2PcdConverter::Config m2p_config; - m2p_config.theta_min = 1.204; - m2p_config.theta_res = 0.013; - m2p_config.theta_max = 2.862; - m2p_config.phi_min = -1.833; - m2p_config.phi_max = 1.833; - m2p_config.phi_res = 0.021; - m2p_config.range_min = 2.0; - m2p_config.range_max = 40.0; - - for (const auto &obj : config_->objs) { - const std::string filename = config_->path + "/" + obj + ".obj"; - CLOG(WARNING, "test") << "Loading obj file: " << filename; - converters_.emplace_back(filename, m2p_config); - } - - const auto num_fixed_objs = config_->fixed_types.size(); - for (size_t i = 0; i < num_fixed_objs; ++i) { - Eigen::Matrix T_scan_obj_vec; - // clang-format off - T_scan_obj_vec << config_->fixed_xs.at(i), - config_->fixed_ys.at(i), - config_->fixed_zs.at(i), - config_->fixed_rolls.at(i), - config_->fixed_pitchs.at(i), - config_->fixed_yaws.at(i); - // clang-format on - const auto T_scan_obj = lgmath::se3::vec2tran(T_scan_obj_vec); - obj_T_scan_objs_.emplace_back(config_->fixed_types.at(i), - T_scan_obj.cast()); - } -} - -void FakeObstacleModule::run_(QueryCache &qdata0, OutputCache & /* output0 */, - const Graph::Ptr & /* graph */, - const TaskExecutor::Ptr & /* executor */) { - auto &qdata = dynamic_cast(qdata0); - - if (config_->visualize && !publisher_initialized_) { - // clang-format off - pointcloud_pub_ = qdata.node->create_publisher("fake_obstacle_scan", 5); - // clang-format on - publisher_initialized_ = true; - } - - auto point_cloud = std::make_shared>( - *qdata.undistorted_point_cloud); - - for (size_t i = 0; i < obj_T_scan_objs_.size(); ++i) { - const auto &obj = obj_T_scan_objs_.at(i).first; - const auto &T_scan_obj = obj_T_scan_objs_.at(i).second; - converters_.at(obj).addToPcd(*point_cloud, T_scan_obj, i == 0); - } - - qdata.undistorted_point_cloud = point_cloud; - - /// publish the updated pointcloud - if (config_->visualize) { - // remove static points - std::vector indices; - indices.reserve(point_cloud->size()); - for (size_t i = 0; i < point_cloud->size(); ++i) { - /// \todo change flex24 to flex23 - if ((*point_cloud)[i].flex24 > 0.5f) indices.emplace_back(i); - } - pcl::PointCloud filtered_points(*point_cloud, indices); - - // publish the aligned points - PointCloudMsg scan_msg; - pcl::toROSMsg(filtered_points, scan_msg); - scan_msg.header.frame_id = "lidar"; - // scan_msg.header.stamp = rclcpp::Time(*qdata.stamp); - pointcloud_pub_->publish(scan_msg); - } -} - -} // namespace lidar -} // namespace vtr \ No newline at end of file diff --git a/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp b/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp index f25505fb1..0fdfa524f 100644 --- a/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp +++ b/main/src/vtr_lidar/src/modules/preprocessing/preprocessing_module_v2.cpp @@ -105,7 +105,7 @@ void PreprocessingModuleV2::run_(QueryCache &qdata0, OutputCache &, if (config_->visualize) { PointCloudMsg pc2_msg; pcl::toROSMsg(*filtered_point_cloud, pc2_msg); - pc2_msg.header.frame_id = "os_sensor"; //"lidar" for honeycomb + pc2_msg.header.frame_id = "lidar"; //"lidar" for honeycomb pc2_msg.header.stamp = rclcpp::Time(*qdata.stamp); filtered_pub_->publish(pc2_msg); } diff --git a/main/src/vtr_lidar/src/path_planning/cbit.cpp b/main/src/vtr_lidar/src/path_planning/cbit.cpp index 47cecb293..6281208fb 100644 --- a/main/src/vtr_lidar/src/path_planning/cbit.cpp +++ b/main/src/vtr_lidar/src/path_planning/cbit.cpp @@ -77,6 +77,9 @@ auto LidarCBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std:: config->forward_vel = node->declare_parameter(prefix + ".mpc.forward_vel", config->forward_vel); config->max_lin_vel = node->declare_parameter(prefix + ".mpc.max_lin_vel", config->max_lin_vel); config->max_ang_vel = node->declare_parameter(prefix + ".mpc.max_ang_vel", config->max_ang_vel); + config->robot_linear_velocity_scale = node->declare_parameter(prefix + ".robot_linear_velocity_scale", config->robot_linear_velocity_scale); + config->robot_angular_velocity_scale = node->declare_parameter(prefix + ".robot_angular_velocity_scale", config->robot_angular_velocity_scale); + // COST FUNCTION Covariances const auto pose_error_diag = node->declare_parameter>(prefix + ".mpc.pose_error_cov", std::vector()); @@ -304,10 +307,10 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { w_p_r_in_r(3) = 0.0; w_p_r_in_r(4) = 0.0; w_p_r_in_r(5) = -1* vel_history.back()[1]; - CLOG(ERROR, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; + CLOG(DEBUG, "mpc_debug.cbit") << "Robot velocity Used for Extrapolation: " << -w_p_r_in_r.transpose() << std::endl; Eigen::Matrix xi_p_r_in_r((dt - (std::floor(dt / control_period) * control_period)) * w_p_r_in_r); T_p_r2 = T_p_r2 * tactic::EdgeTransform(xi_p_r_in_r).inverse(); - CLOG(ERROR, "mpc_debug.cbit") << "The final time period is: " << (dt - (std::floor(dt / control_period) * control_period)); + CLOG(DEBUG, "mpc_debug.cbit") << "The final time period is: " << (dt - (std::floor(dt / control_period) * control_period)); const auto T_p_r_extp2 = T_p_r2; CLOG(DEBUG, "mpc_debug.cbit") << "New extrapolated pose:" << T_p_r_extp2; @@ -501,7 +504,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { applied_vel(1) = 0.0; } - CLOG(ERROR, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); + CLOG(DEBUG, "mpc.cbit") << "The linear velocity is: " << applied_vel(0) << " The angular vel is: " << applied_vel(1); // If required, saturate the output velocity commands based on the configuration limits @@ -521,8 +524,8 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { // return the computed velocity command for the first time step Command command; - command.linear.x = saturated_vel(0) * 1.1; // * 1.1 added to compensate for bad grizzly internal controller set points - command.angular.z = saturated_vel(1); + command.linear.x = saturated_vel(0) * config_->robot_linear_velocity_scale; + command.angular.z = saturated_vel(1) * config_->robot_angular_velocity_scale; // Temporary modification by Jordy to test calibration of the grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); @@ -536,7 +539,7 @@ auto LidarCBIT::computeCommand(RobotState& robot_state0) -> Command { << command.angular.z << "]"; auto command_stop_time = std::chrono::high_resolution_clock::now(); auto duration_command = std::chrono::duration_cast(command_stop_time - command_start_time); - CLOG(ERROR, "mpc.cbit") << "ComputeCommand took: " << duration_command.count() << "ms"; + CLOG(DEBUG, "mpc.cbit") << "ComputeCommand took: " << duration_command.count() << "ms"; return command; } // Otherwise stop the robot diff --git a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp index c6c25de32..d2abae328 100644 --- a/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp +++ b/main/src/vtr_lidar/src/path_planning/mpc_path_planner2.cpp @@ -17,7 +17,7 @@ * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) */ #include "vtr_path_planning/mpc/mpc_path_planner2.hpp" -#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/lateral_error_evaluators.hpp" #include "vtr_path_planning/mpc/custom_loss_functions.hpp" #include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" #include "vtr_lidar/cache.hpp" @@ -261,8 +261,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se //CLOG(INFO, "mpc_debug.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); // compute the lateral error using a custom Homogenous point error STEAM evaluator - const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else - const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + const auto lat_error_right = steam::LateralErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else + const auto lat_error_left = steam::LateralErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); // Previously used Log barriers, however due to instability switch to using inverse squared barriers //const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right); diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp index 3cd714fa2..197cfb528 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit.hpp @@ -102,6 +102,8 @@ class CBIT : public BasePathPlanner { double forward_vel = 0.75; double max_lin_vel = 1.25; double max_ang_vel = 0.75; + double robot_linear_velocity_scale = 1.0; + double robot_angular_velocity_scale = 1.0; // Add unicycle model param diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_config.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_config.hpp index acd088baf..3538fc63e 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_config.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_config.hpp @@ -63,6 +63,9 @@ class CBITConfig { double forward_vel = 0.75; double max_lin_vel = 1.25; double max_ang_vel = 0.75; + double robot_linear_velocity_scale = 1.0; + double robot_angular_velocity_scale = 1.0; + // Add unicycle model param // Covariance Tuning Weights diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp index 980681d76..ec7b763bc 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/cbit/cbit_path_planner.hpp @@ -40,6 +40,14 @@ #pragma once // Note long term, this class should probably be inherited by the base path planner + +// enum for path traversal direction: +enum PathDirection +{ + PATH_DIRECTION_REVERSE = -1, + PATH_DIRECTION_FORWARD = 1 +}; + class CBITPlanner { public: CBITConfig conf; @@ -78,7 +86,7 @@ class CBITPlanner { // Costmap pointer std::shared_ptr cbit_costmap_ptr; - CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); + CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction); protected: struct ChainInfo { @@ -94,10 +102,10 @@ class CBITPlanner { private: void InitializePlanningSpace(); - void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); + void Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction); void ResetPlanner(); - void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction); - std::shared_ptr UpdateState(double path_direction); + void HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction); + std::shared_ptr UpdateState(PathDirection path_direction); std::vector> SampleBox(int m); std::vector> SampleFreeSpace(int m); double BestVertexQueueValue(); diff --git a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/lateral_error_evaluators.hpp similarity index 70% rename from main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp rename to main/src/vtr_path_planning/include/vtr_path_planning/mpc/lateral_error_evaluators.hpp index 135e80d99..3e85ce126 100644 --- a/main/src/vtr_path_planning/include/vtr_path_planning/mpc/custom_steam_evaluators.hpp +++ b/main/src/vtr_path_planning/include/vtr_path_planning/mpc/lateral_error_evaluators.hpp @@ -13,7 +13,7 @@ // limitations under the License. /** - * \file custom_steam_evaluators.hpp + * \file lateral_error_evaluators.hpp * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) */ @@ -22,19 +22,18 @@ #include "steam/evaluable/state_var.hpp" namespace steam { -namespace stereo { -class HomoPointErrorEvaluatorLeft : public Evaluable> { +class LateralErrorEvaluatorLeft : public Evaluable> { public: - using Ptr = std::shared_ptr; - using ConstPtr = std::shared_ptr; + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; using InType = Eigen::Vector4d; using OutType = Eigen::Matrix; static Ptr MakeShared(const Evaluable::ConstPtr& pt, const InType& meas_pt); - HomoPointErrorEvaluatorLeft(const Evaluable::ConstPtr& pt, + LateralErrorEvaluatorLeft(const Evaluable::ConstPtr& pt, const InType& meas_pt); bool active() const override; @@ -54,25 +53,25 @@ class HomoPointErrorEvaluatorLeft : public Evaluable Eigen::Matrix D_ = Eigen::Matrix::Zero(); }; -HomoPointErrorEvaluatorLeft::Ptr homo_point_error_left( - const Evaluable::ConstPtr& pt, - const HomoPointErrorEvaluatorLeft::InType& meas_pt); +LateralErrorEvaluatorLeft::Ptr homo_point_error_left( + const Evaluable::ConstPtr& pt, + const LateralErrorEvaluatorLeft::InType& meas_pt); -class HomoPointErrorEvaluatorRight : public Evaluable> { +class LateralErrorEvaluatorRight : public Evaluable> { public: - using Ptr = std::shared_ptr; - using ConstPtr = std::shared_ptr; + using Ptr = std::shared_ptr; + using ConstPtr = std::shared_ptr; using InType = Eigen::Vector4d; using OutType = Eigen::Matrix; static Ptr MakeShared(const Evaluable::ConstPtr& pt, const InType& meas_pt); - HomoPointErrorEvaluatorRight(const Evaluable::ConstPtr& pt, + LateralErrorEvaluatorRight(const Evaluable::ConstPtr& pt, const InType& meas_pt); bool active() const override; @@ -92,10 +91,9 @@ class HomoPointErrorEvaluatorRight : public Evaluable D_ = Eigen::Matrix::Zero(); }; -HomoPointErrorEvaluatorRight::Ptr homo_point_error_right( - const Evaluable::ConstPtr& pt, - const HomoPointErrorEvaluatorRight::InType& meas_pt); +LateralErrorEvaluatorRight::Ptr homo_point_error_right( + const Evaluable::ConstPtr& pt, + const LateralErrorEvaluatorRight::InType& meas_pt); -} // namespace stereo } // namespace steam diff --git a/main/src/vtr_path_planning/src/cbit/cbit.cpp b/main/src/vtr_path_planning/src/cbit/cbit.cpp index 30a04e637..b17f6c10d 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit.cpp @@ -80,6 +80,9 @@ auto CBIT::Config::fromROS(const rclcpp::Node::SharedPtr& node, const std::strin config->forward_vel = node->declare_parameter(prefix + ".mpc.forward_vel", config->forward_vel); config->max_lin_vel = node->declare_parameter(prefix + ".mpc.max_lin_vel", config->max_lin_vel); config->max_ang_vel = node->declare_parameter(prefix + ".mpc.max_ang_vel", config->max_ang_vel); + config->robot_linear_velocity_scale = node->declare_parameter(prefix + ".robot_linear_velocity_scale", config->robot_linear_velocity_scale); + config->robot_angular_velocity_scale = node->declare_parameter(prefix + ".robot_angular_velocity_scale", config->robot_angular_velocity_scale); + // COST FUNCTION COVARIANCE const auto pose_error_diag = node->declare_parameter>(prefix + ".mpc.pose_error_cov", std::vector()); @@ -257,14 +260,14 @@ void CBIT::initializeRoute(RobotState& robot_state) { CLOG(INFO, "path_planning.cbit") << "The path_yaw is: " << path_yaw; CLOG(INFO, "path_planning.cbit") << "The pose_graph yaw is: " << pose_graph_yaw; // Logic for determining the forward/reverse sign: - double path_direction; //1.0 = forward planning, -1.0 = reverse planning + PathDirection path_direction; //1.0 = forward planning, -1.0 = reverse planning if (abs((abs(path_yaw) - abs(pose_graph_yaw))) > 1.57075) { - path_direction = -1.0; + path_direction = PATH_DIRECTION_REVERSE; } else { - path_direction = 1.0; + path_direction = PATH_DIRECTION_FORWARD; } CLOG(INFO, "path_planning.cbit") << "The path repeat direction is:" << path_direction; @@ -521,8 +524,8 @@ auto CBIT::computeCommand(RobotState& robot_state) -> Command { // return the computed velocity command for the first time step Command command; - command.linear.x = saturated_vel(0) * 1.1; // * 1.1 Added to compensate for bad grizzly internal controller config - command.angular.z = saturated_vel(1); + command.linear.x = saturated_vel(0) * config_->robot_linear_velocity_scale; + command.angular.z = saturated_vel(1) * config_->robot_angular_velocity_scale; // Temporary modification by Jordy to test calibration of hte grizzly controller CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Linear Velocity: " << saturated_vel(0); CLOG(DEBUG, "grizzly_controller_tests.cbit") << "Twist Angular Velocity: " << saturated_vel(1); diff --git a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp index a476625fb..a5b70d2a6 100644 --- a/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp +++ b/main/src/vtr_path_planning/src/cbit/cbit_path_planner.cpp @@ -44,7 +44,7 @@ auto CBITPlanner::getChainInfo(vtr::path_planning::BasePathPlanner::RobotState& } // Class Constructor: -CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) +CBITPlanner::CBITPlanner(CBITConfig conf_in, std::shared_ptr path_in, vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr> path_ptr, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction) { // Setting random seed @@ -142,7 +142,7 @@ void CBITPlanner::InitializePlanningSpace() } // Reset fuction which goes through the entire reset procedure (including calling ResetPlanner and restarting the planner itself) -void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) +void CBITPlanner::HardReset(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction) { // I think we may also want to add a small time delay just so we arent repeatedly spamming an optimal solution CLOG(ERROR, "path_planning.cbit_planner") << "Plan could not be improved, Initiating Reset"; @@ -239,7 +239,7 @@ void CBITPlanner::ResetPlanner() // Main planning function -void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, double path_direction) +void CBITPlanner::Planning(vtr::path_planning::BasePathPlanner::RobotState& robot_state, std::shared_ptr costmap_ptr, std::shared_ptr corridor_ptr, PathDirection path_direction) { // find some better places to initialize these double prev_path_cost = INFINITY; // experimental @@ -1267,7 +1267,7 @@ void CBITPlanner::Prune(double c_best, double c_best_weighted) // Function for updating the state of the robot, updates the goal and sliding window, and prunes the current tree // Note that this stage requires us to convert the state of the robot in euclidean space into a singular curvilinear space pt, which is actually slightly tricky // To do without singularities -std::shared_ptr CBITPlanner::UpdateState(double path_direction) +std::shared_ptr CBITPlanner::UpdateState(PathDirection path_direction) { //std::cout << "The new state is x: " << new_state->x << " y: " << new_state->y << std::endl; @@ -1339,9 +1339,22 @@ std::shared_ptr CBITPlanner::UpdateState(double path_direction) //std::cout << "q_min is: " << q_min << std::endl; // debug; // Note I think we also need to take into account the direction the robot is facing on the path for reverse planning too - q_min = q_min * q_sign * path_direction; + q_min = q_min * q_sign; //std::cout << "q_min is: " << q_min << std::endl; // debug; + switch (path_direction) { + case PATH_DIRECTION_REVERSE: + q_min = -q_min; + break; + case PATH_DIRECTION_FORWARD: + // Do nothing, q_min already has the correct sign + break; + default: + // Handle error case where path_direction is not a valid value + break; + } + + // Once we have the closest point on the path, it may not actually be the correct p-value because of singularities in the euclid to curv conversion // We need to use this points p-value as a starting point, then search p, qmin space in either direction discretely and find the point with diff --git a/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp index 462c6a31f..7e4057780 100644 --- a/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp +++ b/main/src/vtr_path_planning/src/mpc/lateral_error_evaluator.cpp @@ -1,15 +1,14 @@ -#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/lateral_error_evaluators.hpp" #include namespace steam { -namespace stereo { -auto HomoPointErrorEvaluatorRight::MakeShared(const Evaluable::ConstPtr& pt, +auto LateralErrorEvaluatorRight::MakeShared(const Evaluable::ConstPtr& pt, const InType& meas_pt) -> Ptr { - return std::make_shared(pt, meas_pt); + return std::make_shared(pt, meas_pt); } -HomoPointErrorEvaluatorRight::HomoPointErrorEvaluatorRight( +LateralErrorEvaluatorRight::LateralErrorEvaluatorRight( const Evaluable::ConstPtr& pt, const InType& meas_pt) : pt_(pt), meas_pt_(meas_pt) { //D_.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity(); @@ -17,17 +16,17 @@ HomoPointErrorEvaluatorRight::HomoPointErrorEvaluatorRight( } -bool HomoPointErrorEvaluatorRight::active() const { return pt_->active(); } +bool LateralErrorEvaluatorRight::active() const { return pt_->active(); } -void HomoPointErrorEvaluatorRight::getRelatedVarKeys(KeySet& keys) const { +void LateralErrorEvaluatorRight::getRelatedVarKeys(KeySet& keys) const { pt_->getRelatedVarKeys(keys); } -auto HomoPointErrorEvaluatorRight::value() const -> OutType { +auto LateralErrorEvaluatorRight::value() const -> OutType { return D_ * (pt_->value() - meas_pt_); } -auto HomoPointErrorEvaluatorRight::forward() const -> Node::Ptr { +auto LateralErrorEvaluatorRight::forward() const -> Node::Ptr { const auto child = pt_->forward(); const auto value = D_ * (child->value() - meas_pt_); if (value <= 0.0) @@ -39,7 +38,7 @@ auto HomoPointErrorEvaluatorRight::forward() const -> Node::Ptr { return node; } -void HomoPointErrorEvaluatorRight::backward(const Eigen::MatrixXd& lhs, +void LateralErrorEvaluatorRight::backward(const Eigen::MatrixXd& lhs, const Node::Ptr& node, Jacobians& jacs) const { if (pt_->active()) { @@ -49,21 +48,21 @@ void HomoPointErrorEvaluatorRight::backward(const Eigen::MatrixXd& lhs, } } -HomoPointErrorEvaluatorRight::Ptr homo_point_error_right( - const Evaluable::ConstPtr& pt, - const HomoPointErrorEvaluatorRight::InType& meas_pt) { - return HomoPointErrorEvaluatorRight::MakeShared(pt, meas_pt); +LateralErrorEvaluatorRight::Ptr homo_point_error_right( + const Evaluable::ConstPtr& pt, + const LateralErrorEvaluatorRight::InType& meas_pt) { + return LateralErrorEvaluatorRight::MakeShared(pt, meas_pt); } -auto HomoPointErrorEvaluatorLeft::MakeShared(const Evaluable::ConstPtr& pt, +auto LateralErrorEvaluatorLeft::MakeShared(const Evaluable::ConstPtr& pt, const InType& meas_pt) -> Ptr { - return std::make_shared(pt, meas_pt); + return std::make_shared(pt, meas_pt); } -HomoPointErrorEvaluatorLeft::HomoPointErrorEvaluatorLeft( +LateralErrorEvaluatorLeft::LateralErrorEvaluatorLeft( const Evaluable::ConstPtr& pt, const InType& meas_pt) : pt_(pt), meas_pt_(meas_pt) { //D_.block<3, 3>(0, 0) = Eigen::Matrix3d::Identity(); @@ -71,17 +70,17 @@ HomoPointErrorEvaluatorLeft::HomoPointErrorEvaluatorLeft( } -bool HomoPointErrorEvaluatorLeft::active() const { return pt_->active(); } +bool LateralErrorEvaluatorLeft::active() const { return pt_->active(); } -void HomoPointErrorEvaluatorLeft::getRelatedVarKeys(KeySet& keys) const { +void LateralErrorEvaluatorLeft::getRelatedVarKeys(KeySet& keys) const { pt_->getRelatedVarKeys(keys); } -auto HomoPointErrorEvaluatorLeft::value() const -> OutType { +auto LateralErrorEvaluatorLeft::value() const -> OutType { return D_ * (meas_pt_ - pt_->value()); } -auto HomoPointErrorEvaluatorLeft::forward() const -> Node::Ptr { +auto LateralErrorEvaluatorLeft::forward() const -> Node::Ptr { const auto child = pt_->forward(); const auto value = D_ * (meas_pt_ - child->value()); const auto node = Node::MakeShared(value); @@ -89,7 +88,7 @@ auto HomoPointErrorEvaluatorLeft::forward() const -> Node::Ptr { return node; } -void HomoPointErrorEvaluatorLeft::backward(const Eigen::MatrixXd& lhs, +void LateralErrorEvaluatorLeft::backward(const Eigen::MatrixXd& lhs, const Node::Ptr& node, Jacobians& jacs) const { if (pt_->active()) { @@ -99,11 +98,10 @@ void HomoPointErrorEvaluatorLeft::backward(const Eigen::MatrixXd& lhs, } } -HomoPointErrorEvaluatorLeft::Ptr homo_point_error_left( - const Evaluable::ConstPtr& pt, - const HomoPointErrorEvaluatorLeft::InType& meas_pt) { - return HomoPointErrorEvaluatorLeft::MakeShared(pt, meas_pt); +LateralErrorEvaluatorLeft::Ptr homo_point_error_left( + const Evaluable::ConstPtr& pt, + const LateralErrorEvaluatorLeft::InType& meas_pt) { + return LateralErrorEvaluatorLeft::MakeShared(pt, meas_pt); } -} // namespace stereo } // namespace steam \ No newline at end of file diff --git a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp index a90a8cf8b..60e0de308 100644 --- a/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp +++ b/main/src/vtr_path_planning/src/mpc/mpc_path_planner2.cpp @@ -17,7 +17,7 @@ * \author Jordy Sehn, Autonomous Space Robotics Lab (ASRL) */ #include "vtr_path_planning/mpc/mpc_path_planner2.hpp" -#include "vtr_path_planning/mpc/custom_steam_evaluators.hpp" +#include "vtr_path_planning/mpc/lateral_error_evaluators.hpp" #include "vtr_path_planning/mpc/custom_loss_functions.hpp" #include "vtr_path_planning/mpc/scalar_log_barrier_evaluator.hpp" //#include "vtr_lidar/cache.hpp" // For lidar version of the planner only @@ -262,8 +262,8 @@ struct mpc_result SolveMPC2(Eigen::Matrix previous_vel, lgmath::se //CLOG(INFO, "mpc_debug.cbit") << "The vtr measurement for this value is: " << measurements[i].inverse(); // compute the lateral error using a custom Homogenous point error STEAM evaluator - const auto lat_error_right = steam::stereo::HomoPointErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else - const auto lat_error_left = steam::stereo::HomoPointErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); + const auto lat_error_right = steam::LateralErrorEvaluatorRight::MakeShared(error_vec, barrier_right); // TODO, rename this evaluator to something else + const auto lat_error_left = steam::LateralErrorEvaluatorLeft::MakeShared(error_vec, barrier_left); // Previously used Log barriers, however due to instability switch to using inverse squared barriers //const auto lat_barrier_right = steam::vspace::ScalarLogBarrierEvaluator<1>::MakeShared(lat_error_right);