diff --git a/all_seaing_bringup/launch/shoreside.launch.py b/all_seaing_bringup/launch/shoreside.launch.py index f9932d08..5f905af6 100644 --- a/all_seaing_bringup/launch/shoreside.launch.py +++ b/all_seaing_bringup/launch/shoreside.launch.py @@ -67,7 +67,7 @@ def generate_launch_description(): return LaunchDescription( [ DeclareLaunchArgument( - "launch_rviz", default_value="true", choices=["true", "false"] + "launch_rviz", default_value="false", choices=["true", "false"] ), DeclareLaunchArgument( "use_lora", default_value="false", choices=["true", "false"] diff --git a/all_seaing_bringup/launch/sim.launch.py b/all_seaing_bringup/launch/sim.launch.py index 030ff7b1..bdde25af 100644 --- a/all_seaing_bringup/launch/sim.launch.py +++ b/all_seaing_bringup/launch/sim.launch.py @@ -113,7 +113,10 @@ def launch_setup(context, *args, **kwargs): parameters=[ { "color_label_mappings_file": color_label_mappings, - } + }, + { + "is_sim": True, + }, ], ) @@ -153,13 +156,9 @@ def launch_setup(context, *args, **kwargs): ("point_cloud", "point_cloud/filtered"), ], parameters=[ - {"robot_frame_id": "wamv/wamv/base_link"}, {"obstacle_size_min": 2}, {"obstacle_size_max": 60}, {"clustering_distance": 1.0}, - {"obstacle_seg_thresh": 10.0}, - {"obstacle_drop_thresh": 1.0}, - {"polygon_area_thresh": 100000.0}, ], ) diff --git a/all_seaing_bringup/launch/vehicle.launch.py b/all_seaing_bringup/launch/vehicle.launch.py index 51184378..37f6373c 100644 --- a/all_seaing_bringup/launch/vehicle.launch.py +++ b/all_seaing_bringup/launch/vehicle.launch.py @@ -118,8 +118,7 @@ def launch_setup(context, *args, **kwargs): ("point_cloud", "/velodyne_points"), ], parameters=[ - #{"range_x": [-0.5, 1.5]}, - #{"range_y": [0.0, 3.0]}, + {"range_radius": [0.5, 100000.0]}, ], ) @@ -141,12 +140,9 @@ def launch_setup(context, *args, **kwargs): ("point_cloud", "point_cloud/filtered"), ], parameters=[ - {"obstacle_size_min": 2}, - {"obstacle_size_max": 1000}, - {"clustering_distance": 1.0}, - {"obstacle_seg_thresh": 10.0}, - {"obstacle_drop_thresh": 1.0}, - {"polygon_area_thresh": 100000.0}, + {"obstacle_size_min": 20}, + {"obstacle_size_max": 800}, + {"clustering_distance": 0.1}, ], ) diff --git a/all_seaing_perception/all_seaing_perception/obstacle_bbox_visualizer.cpp b/all_seaing_perception/all_seaing_perception/obstacle_bbox_visualizer.cpp index c1f124a1..39cab040 100644 --- a/all_seaing_perception/all_seaing_perception/obstacle_bbox_visualizer.cpp +++ b/all_seaing_perception/all_seaing_perception/obstacle_bbox_visualizer.cpp @@ -1,182 +1,237 @@ -#include -#include +#include "rclcpp/rclcpp.hpp" + +#include "message_filters/subscriber.h" +#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/synchronizer.h" -#include "all_seaing_perception/obstacle_bbox_visualizer.hpp" +#include "all_seaing_interfaces/msg/labeled_bounding_box2_d_array.hpp" +#include "all_seaing_interfaces/msg/obstacle_map.hpp" #include "geometry_msgs/msg/point.hpp" +#include "geometry_msgs/msg/transform_stamped.hpp" +#include "image_geometry/pinhole_camera_model.h" +#include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/image.hpp" +#include "sensor_msgs/msg/point_cloud2.hpp" + +#include "tf2/LinearMath/Matrix3x3.h" +#include "tf2/LinearMath/Quaternion.h" #include "tf2_geometry_msgs/tf2_geometry_msgs.hpp" +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" + #include "yaml-cpp/yaml.h" #include #include -ObstacleBboxVisualizer::ObstacleBboxVisualizer() : Node("obstacle_bbox_visualizer") { - - // initialize transform listener - m_tf_buffer = std::make_unique(this->get_clock()); - m_tf_listener = std::make_shared(*m_tf_buffer); - - // Initialize subscribers - m_image_intrinsics_sub = this->create_subscription( - "camera_info", 10, - std::bind(&ObstacleBboxVisualizer::intrinsics_cb, this, std::placeholders::_1)); - m_image_sub.subscribe(this, "image", rmw_qos_profile_sensor_data); - m_obstacle_map_sub.subscribe(this, "obstacle_map/labeled", rmw_qos_profile_default); - m_bbox_sub.subscribe(this, "bounding_boxes", rmw_qos_profile_default); - - // Initialize publisher - m_image_pub = this->create_publisher("image/obstacle_visualized", 10); - - // Initialize synchronizer - m_obstacle_bbox_visualizer_sync = std::make_shared( - ObstacleBboxVisualizerPolicy(10), m_image_sub, m_obstacle_map_sub, m_bbox_sub); - m_obstacle_bbox_visualizer_sync->registerCallback( - std::bind(&ObstacleBboxVisualizer::image_obstacle_cb, this, std::placeholders::_1, - std::placeholders::_2, std::placeholders::_3)); - - this->declare_parameter("color_label_mappings_file", ""); - color_label_mappings_file = this->get_parameter("color_label_mappings_file").as_string(); - std::ifstream yamlFile(color_label_mappings_file); - - if (yamlFile.is_open()) { - config_yaml = YAML::Load(yamlFile); - for (YAML::const_iterator it = config_yaml.begin(); it != config_yaml.end(); ++it) { - // RCLCPP_INFO(this->get_logger(), "label %d and color %s", it->second.as(), - // it->second.as().c_str()); - label_color_map[it->second.as()] = it->first.as(); +#include +#include +#include + +class ObstacleBboxVisualizer : public rclcpp::Node { +public: + ObstacleBboxVisualizer() : Node("obstacle_bbox_visualizer") { + // Initialize parameters + this->declare_parameter("is_sim", false); + m_is_sim = this->get_parameter("is_sim").as_bool(); + + // Initialize transform listener + m_tf_buffer = std::make_unique(this->get_clock()); + m_tf_listener = std::make_shared(*m_tf_buffer); + + // Initialize subscribers + m_image_intrinsics_sub = this->create_subscription( + "camera_info", 10, + std::bind(&ObstacleBboxVisualizer::intrinsics_cb, this, std::placeholders::_1)); + m_image_sub.subscribe(this, "image", rmw_qos_profile_sensor_data); + m_obstacle_map_sub.subscribe(this, "obstacle_map/labeled", rmw_qos_profile_default); + m_bbox_sub.subscribe(this, "bounding_boxes", rmw_qos_profile_default); + + // Initialize publisher + m_image_pub = + this->create_publisher("image/obstacle_visualized", 10); + + // Initialize synchronizer + m_obstacle_bbox_visualizer_sync = std::make_shared( + ObstacleBboxVisualizerPolicy(10), m_image_sub, m_obstacle_map_sub, m_bbox_sub); + m_obstacle_bbox_visualizer_sync->registerCallback( + std::bind(&ObstacleBboxVisualizer::image_obstacle_cb, this, std::placeholders::_1, + std::placeholders::_2, std::placeholders::_3)); + + this->declare_parameter("color_label_mappings_file", ""); + m_color_label_mappings_file = this->get_parameter("color_label_mappings_file").as_string(); + std::ifstream yamlFile(m_color_label_mappings_file); + + if (yamlFile.is_open()) { + config_yaml = YAML::Load(yamlFile); + for (YAML::const_iterator it = config_yaml.begin(); it != config_yaml.end(); ++it) { + m_label_color_map[it->second.as()] = it->first.as(); + } + } else { + RCLCPP_ERROR(this->get_logger(), "Failed to open YAML file: %s", + m_color_label_mappings_file.c_str()); } - } else { - RCLCPP_ERROR(this->get_logger(), "Failed to open YAML file: %s", - color_label_mappings_file.c_str()); - } - yamlFile.close(); -} - -ObstacleBboxVisualizer::~ObstacleBboxVisualizer() {} -void ObstacleBboxVisualizer::image_obstacle_cb( - const sensor_msgs::msg::Image::ConstSharedPtr &in_img_msg, - const all_seaing_interfaces::msg::ObstacleMap::ConstSharedPtr &in_map_msg, - const all_seaing_interfaces::msg::LabeledBoundingBox2DArray::ConstSharedPtr &in_bbox_msg) { - - // Get the transform from LiDAR to camera - if (!m_pc_cam_tf_ok) { - m_pc_cam_tf = get_tf(in_bbox_msg->header.frame_id, in_map_msg->local_header.frame_id); + yamlFile.close(); } - cv_bridge::CvImagePtr cv_ptr; - try { - cv_ptr = cv_bridge::toCvCopy(in_img_msg, sensor_msgs::image_encodings::BGR8); - } catch (cv_bridge::Exception &e) { - RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); - return; - } - - for (const auto &obstacle : in_map_msg->obstacles) { - - // transform the lidar point to camera frame - geometry_msgs::msg::Point lidar_point; - lidar_point.x = obstacle.local_point.point.x; - lidar_point.y = obstacle.local_point.point.y; - lidar_point.z = obstacle.local_point.point.z; - geometry_msgs::msg::Point camera_point; - tf2::doTransform(lidar_point, camera_point, m_pc_cam_tf); +private: + void image_obstacle_cb( + const sensor_msgs::msg::Image::ConstSharedPtr &in_img_msg, + const all_seaing_interfaces::msg::ObstacleMap::ConstSharedPtr &in_map_msg, + const all_seaing_interfaces::msg::LabeledBoundingBox2DArray::ConstSharedPtr &in_bbox_msg) { - // RCLCPP_INFO(this->get_logger(), "Lidar point: (%f, %f, %f)", lidar_point.x, - // lidar_point.y, lidar_point.z); RCLCPP_INFO(this->get_logger(), "Camera point: (%f, %f, - // %f)", camera_point.x, camera_point.y, camera_point.z); + // Get the transform from LiDAR to camera + if (!m_pc_cam_tf_ok) { + m_pc_cam_tf = get_tf(in_bbox_msg->header.frame_id, in_map_msg->local_header.frame_id); + } - // find the centroid and display it. - cv::Point3d centroid(camera_point.y, camera_point.z, -camera_point.x); - cv::Point2d pixel_centroid = m_cam_model.project3dToPixel(centroid); + cv_bridge::CvImagePtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvCopy(in_img_msg, sensor_msgs::image_encodings::BGR8); + } catch (cv_bridge::Exception &e) { + RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); + return; + } - if (pixel_centroid.x >= 0 && pixel_centroid.x < cv_ptr->image.cols && - pixel_centroid.y >= 0 && pixel_centroid.y < cv_ptr->image.rows) { - cv::Scalar color = get_color_for_label(obstacle.label); + for (const auto &obstacle : in_map_msg->obstacles) { + + // Transform the lidar point to camera frame + geometry_msgs::msg::Point lidar_point; + lidar_point.x = obstacle.local_point.point.x; + lidar_point.y = obstacle.local_point.point.y; + lidar_point.z = obstacle.local_point.point.z; + geometry_msgs::msg::Point camera_point; + tf2::doTransform(lidar_point, camera_point, m_pc_cam_tf); + + // Find the centroid and display it. + cv::Point2d pixel_centroid = + m_is_sim ? m_cam_model.project3dToPixel( + cv::Point3d(camera_point.y, camera_point.z, -camera_point.x)) + : m_cam_model.project3dToPixel( + cv::Point3d(camera_point.x, camera_point.y, camera_point.z)); + if (pixel_centroid.x >= 0 && pixel_centroid.x < cv_ptr->image.cols && + pixel_centroid.y >= 0 && pixel_centroid.y < cv_ptr->image.rows) { + cv::Scalar color = get_color_for_label(obstacle.label); + + cv::circle(cv_ptr->image, pixel_centroid, 5, color, -1); + } else { + RCLCPP_WARN(this->get_logger(), "Centroid outside image bounds: (%f, %f)", + pixel_centroid.x, pixel_centroid.y); + } + + // Display bbox -> 2d bbox. This is the 2d bbox of the lidar point. + geometry_msgs::msg::Point camera_bbox_min; + geometry_msgs::msg::Point camera_bbox_max; + tf2::doTransform(obstacle.bbox_min, camera_bbox_min, + m_pc_cam_tf); + tf2::doTransform(obstacle.bbox_max, camera_bbox_max, + m_pc_cam_tf); + cv::Point pt1 = + m_is_sim ? m_cam_model.project3dToPixel( + cv::Point3d(camera_bbox_min.y, camera_bbox_min.z, -camera_bbox_min.x)) + : m_cam_model.project3dToPixel( + cv::Point3d(camera_bbox_min.x, camera_bbox_min.y, camera_bbox_min.z)); + cv::Point pt2 = + m_is_sim ? m_cam_model.project3dToPixel( + cv::Point3d(camera_bbox_max.y, camera_bbox_max.z, -camera_bbox_max.x)) + : m_cam_model.project3dToPixel( + cv::Point3d(camera_bbox_max.x, camera_bbox_max.y, camera_bbox_max.z)); + cv::Scalar color(0, 255, 255); + cv::rectangle(cv_ptr->image, pt1, pt2, color, 2); + + // Define the position for the label text, slightly above the bounding box + cv::Point label_position(pt1.x, pt1.y - 5); // Adjust to position above the rectangle + + // Add label text + std::string label_text = + std::to_string(obstacle.label); // Replace with whatever string you want + cv::putText(cv_ptr->image, label_text, label_position, cv::FONT_HERSHEY_SIMPLEX, 0.5, + color, 1); + } - cv::circle(cv_ptr->image, pixel_centroid, 5, color, -1); - } else { - RCLCPP_WARN(this->get_logger(), "Centroid outside image bounds: (%f, %f)", - pixel_centroid.x, pixel_centroid.y); + // Draw the bounding boxes + for (const auto &bbox : in_bbox_msg->boxes) { + cv::Scalar color = get_color_for_label(bbox.label); + cv::Point pt1(bbox.min_x, bbox.min_y); + cv::Point pt2(bbox.max_x, bbox.max_y); + cv::rectangle(cv_ptr->image, pt1, pt2, color, 2); } - // display bbox -> 2d bbox. this is the 2d bbox of the lidar point. - geometry_msgs::msg::Point camera_bbox_min; - geometry_msgs::msg::Point camera_bbox_max; - tf2::doTransform(obstacle.bbox_min, camera_bbox_min, - m_pc_cam_tf); - tf2::doTransform(obstacle.bbox_max, camera_bbox_max, - m_pc_cam_tf); - cv::Point3d ptmin(camera_bbox_min.y, camera_bbox_min.z, -camera_bbox_min.x); - cv::Point3d ptmax(camera_bbox_max.y, camera_bbox_max.z, -camera_bbox_max.x); - cv::Point pt1 = m_cam_model.project3dToPixel(ptmin); - cv::Point pt2 = m_cam_model.project3dToPixel(ptmax); - cv::Scalar color(0, 255, 255); - cv::rectangle(cv_ptr->image, pt1, pt2, color, 2); - - // Define the position for the label text, slightly above the bounding box - cv::Point label_position(pt1.x, pt1.y - 5); // Adjust to position above the rectangle - - // Add label text - std::string label_text = - std::to_string(obstacle.label); // Replace with whatever string you want - cv::putText(cv_ptr->image, label_text, label_position, cv::FONT_HERSHEY_SIMPLEX, 0.5, color, - 1); - // RCLCPP_INFO(this->get_logger(), "visualizer Bbox min: (%d, %d)", pt1.x, pt1.y); - // RCLCPP_INFO(this->get_logger(), "visualizer Bbox max: (%d, %d)", pt2.x, pt2.y); + m_image_pub->publish(*cv_ptr->toImageMsg()); } - // draw the bounding boxes - for (const auto &bbox : in_bbox_msg->boxes) { - cv::Scalar color = get_color_for_label(bbox.label); - cv::Point pt1(bbox.min_x, bbox.min_y); - cv::Point pt2(bbox.max_x, bbox.max_y); - cv::rectangle(cv_ptr->image, pt1, pt2, color, 2); + void intrinsics_cb(const sensor_msgs::msg::CameraInfo::ConstSharedPtr &info_msg) { + m_cam_model.fromCameraInfo(info_msg); } - m_image_pub->publish(*cv_ptr->toImageMsg()); -} - -void ObstacleBboxVisualizer::intrinsics_cb( - const sensor_msgs::msg::CameraInfo::ConstSharedPtr &info_msg) { - m_cam_model.fromCameraInfo(info_msg); -} + geometry_msgs::msg::TransformStamped get_tf(const std::string &in_target_frame, + const std::string &in_src_frame) { + geometry_msgs::msg::TransformStamped tf; + m_pc_cam_tf_ok = false; + try { + tf = m_tf_buffer->lookupTransform(in_target_frame, in_src_frame, tf2::TimePointZero); + m_pc_cam_tf_ok = true; + RCLCPP_INFO(this->get_logger(), "LiDAR to Camera Transform good in visualizer"); + RCLCPP_INFO(this->get_logger(), "in_target_frame: %s, in_src_frame: %s", + in_target_frame.c_str(), in_src_frame.c_str()); + } catch (tf2::TransformException &ex) { + RCLCPP_ERROR(this->get_logger(), "Error in LiDAR to Camera TF in visualizer: %s", + ex.what()); + } + return tf; + } -geometry_msgs::msg::TransformStamped -ObstacleBboxVisualizer::get_tf(const std::string &in_target_frame, - const std::string &in_src_frame) { - geometry_msgs::msg::TransformStamped tf; - m_pc_cam_tf_ok = false; - try { - tf = m_tf_buffer->lookupTransform(in_target_frame, in_src_frame, tf2::TimePointZero); - m_pc_cam_tf_ok = true; - RCLCPP_INFO(this->get_logger(), "LiDAR to Camera Transform good in visualizer"); - RCLCPP_INFO(this->get_logger(), "in_target_frame: %s, in_src_frame: %s", - in_target_frame.c_str(), in_src_frame.c_str()); - } catch (tf2::TransformException &ex) { - RCLCPP_ERROR(this->get_logger(), "Error in LiDAR to Camera TF in visualizer: %s", - ex.what()); + cv::Scalar get_color_for_label(const int &label) { + std::string color = m_label_color_map[label]; + + if (color == "orange") + return cv::Scalar(0, 165, 255); + if (color == "red") + return cv::Scalar(0, 0, 255); + if (color == "green") + return cv::Scalar(0, 255, 0); + if (color == "black") + return cv::Scalar(0, 0, 0); + if (color == "white") + return cv::Scalar(255, 255, 255); + return cv::Scalar(255, 0, 0); // blue } - return tf; -} -cv::Scalar ObstacleBboxVisualizer::get_color_for_label(const int &label) { - std::string color = label_color_map[label]; - - if (color == "orange") - return cv::Scalar(0, 165, 255); - if (color == "red") - return cv::Scalar(0, 0, 255); - if (color == "green") - return cv::Scalar(0, 255, 0); - if (color == "black") - return cv::Scalar(0, 0, 0); - if (color == "white") - return cv::Scalar(255, 255, 255); - return cv::Scalar(255, 0, 0); // blue -} + // Publishers and subscribers + rclcpp::Publisher::SharedPtr m_image_pub; + rclcpp::Subscription::SharedPtr m_image_intrinsics_sub; + message_filters::Subscriber m_image_sub; + message_filters::Subscriber m_obstacle_map_sub; + message_filters::Subscriber m_bbox_sub; + + // Transform variables + std::shared_ptr m_tf_listener{nullptr}; + std::unique_ptr m_tf_buffer; + geometry_msgs::msg::TransformStamped m_pc_cam_tf; + bool m_pc_cam_tf_ok; + std::string m_color_label_mappings_file; + YAML::Node config_yaml; + std::map m_label_color_map; + + // Camera model + image_geometry::PinholeCameraModel m_cam_model; + + // Check if running in sim + bool m_is_sim; + + // Synchronizer + typedef message_filters::sync_policies::ApproximateTime< + sensor_msgs::msg::Image, + all_seaing_interfaces::msg::ObstacleMap, + all_seaing_interfaces::msg::LabeledBoundingBox2DArray> + ObstacleBboxVisualizerPolicy; + typedef message_filters::Synchronizer ObstacleBboxVisualizerSync; + std::shared_ptr m_obstacle_bbox_visualizer_sync; +}; int main(int argc, char *argv[]) { rclcpp::init(argc, argv); rclcpp::spin(std::make_shared()); rclcpp::shutdown(); return 0; -} \ No newline at end of file +} diff --git a/all_seaing_perception/all_seaing_perception/obstacle_detector.cpp b/all_seaing_perception/all_seaing_perception/obstacle_detector.cpp index eaf47acc..ed9b68e6 100644 --- a/all_seaing_perception/all_seaing_perception/obstacle_detector.cpp +++ b/all_seaing_perception/all_seaing_perception/obstacle_detector.cpp @@ -27,18 +27,12 @@ class ObstacleDetector : public rclcpp::Node { this->declare_parameter("obstacle_size_min", 20); this->declare_parameter("obstacle_size_max", 100000); this->declare_parameter("clustering_distance", 0.75); - this->declare_parameter("obstacle_seg_thresh", 1.0); - this->declare_parameter("obstacle_drop_thresh", 1.0); - this->declare_parameter("polygon_area_thresh", 100000.0); // Initialize member variables from parameters m_global_frame_id = this->get_parameter("global_frame_id").as_string(); m_obstacle_size_min = this->get_parameter("obstacle_size_min").as_int(); m_obstacle_size_max = this->get_parameter("obstacle_size_max").as_int(); m_clustering_distance = this->get_parameter("clustering_distance").as_double(); - m_obstacle_seg_thresh = this->get_parameter("obstacle_seg_thresh").as_double(); - m_obstacle_drop_thresh = this->get_parameter("obstacle_drop_thresh").as_double(); - m_polygon_area_thresh = this->get_parameter("polygon_area_thresh").as_double(); // Initialize tf_listener pointer m_tf_buffer = std::make_unique(this->get_clock()); @@ -139,9 +133,6 @@ class ObstacleDetector : public rclcpp::Node { int m_obstacle_size_min; int m_obstacle_size_max; double m_clustering_distance; - double m_obstacle_seg_thresh; - double m_obstacle_drop_thresh; - double m_polygon_area_thresh; // Transform variables std::shared_ptr m_tf_listener{nullptr}; diff --git a/all_seaing_perception/all_seaing_perception/point_cloud_filter.cpp b/all_seaing_perception/all_seaing_perception/point_cloud_filter.cpp index 05d4e297..5277dcba 100644 --- a/all_seaing_perception/all_seaing_perception/point_cloud_filter.cpp +++ b/all_seaing_perception/all_seaing_perception/point_cloud_filter.cpp @@ -25,6 +25,7 @@ class PointCloudFilter : public rclcpp::Node { this->declare_parameter>("range_intensity", {0.0, 100000.0}); this->declare_parameter>("range_x", {-100000.0, 100000.0}); this->declare_parameter>("range_y", {-100000.0, 100000.0}); + this->declare_parameter>("range_z", {-100000.0, 100000.0}); this->declare_parameter("leaf_size", 0.0); // Initialize tf_listener pointer @@ -46,6 +47,7 @@ class PointCloudFilter : public rclcpp::Node { m_range_intensity = this->get_parameter("range_intensity").as_double_array(); m_range_x = this->get_parameter("range_x").as_double_array(); m_range_y = this->get_parameter("range_y").as_double_array(); + m_range_z = this->get_parameter("range_z").as_double_array(); m_leaf_size = this->get_parameter("leaf_size").as_double(); } @@ -73,7 +75,7 @@ class PointCloudFilter : public rclcpp::Node { pcl::PointCloud::Ptr &out_cloud_ptr) { // Keep point if in radius thresholds, intensity threshold, and is finite for (const auto &point : in_cloud_ptr->points) { - double radius = sqrt(point.x * point.x + point.y * point.y + point.z * point.z); + double radius = sqrt(point.x * point.x + point.y * point.y); // Convert point to map geometry_msgs::msg::Point point_msg; @@ -85,6 +87,7 @@ class PointCloudFilter : public rclcpp::Node { if (m_range_x[0] <= point_tf.x && point_tf.x <= m_range_x[1] && m_range_y[0] <= point_tf.y && point_tf.y <= m_range_y[1] && + m_range_z[0] <= point_tf.z && point_tf.z <= m_range_z[1] && m_range_radius[0] <= radius && radius <= m_range_radius[1] && m_range_intensity[0] <= point.intensity && point.intensity <= m_range_intensity[1] && @@ -136,6 +139,7 @@ class PointCloudFilter : public rclcpp::Node { std::vector m_range_intensity; std::vector m_range_x; std::vector m_range_y; + std::vector m_range_z; double m_leaf_size; }; diff --git a/all_seaing_perception/all_seaing_perception/point_cloud_image_overlay.cpp b/all_seaing_perception/all_seaing_perception/point_cloud_image_overlay.cpp index 20d283ce..f422be15 100644 --- a/all_seaing_perception/all_seaing_perception/point_cloud_image_overlay.cpp +++ b/all_seaing_perception/all_seaing_perception/point_cloud_image_overlay.cpp @@ -14,7 +14,8 @@ void PclImageOverlay::pc_image_fusion_cb( const sensor_msgs::msg::PointCloud2::ConstSharedPtr &in_cloud_msg) { // Only continue if intrinsics has been initialized - if (!m_cam_model.initialized()) return; + if (!m_cam_model.initialized()) + return; // Get transform the first iteration if (!m_pc_cam_tf.has_value()) @@ -31,33 +32,36 @@ void PclImageOverlay::pc_image_fusion_cb( // Transform in_cloud_msg and convert PointCloud2 to PCL PointCloud sensor_msgs::msg::PointCloud2 in_cloud_tf; - tf2::doTransform(*in_cloud_msg, in_cloud_tf, m_pc_cam_tf.value()); + tf2::doTransform(*in_cloud_msg, in_cloud_tf, + m_pc_cam_tf.value()); pcl::PointCloud::Ptr in_cloud_tf_ptr(new pcl::PointCloud); pcl::fromROSMsg(in_cloud_tf, *in_cloud_tf_ptr); for (pcl::PointXYZI &point_tf : in_cloud_tf_ptr->points) { // Ignore points which are NaN - if (isnan(point_tf.x) || isnan(point_tf.y) || isnan(point_tf.z)) continue; + if (isnan(point_tf.x) || isnan(point_tf.y) || isnan(point_tf.z)) + continue; // Project 3D point onto the image plane using the intrinsic matrix. // Gazebo has a different coordinate system, so the y, z, and x coordinates are modified. - cv::Point2d xy_rect = m_is_sim ? - m_cam_model.project3dToPixel(cv::Point3d(point_tf.y, point_tf.z, -point_tf.x)) : - m_cam_model.project3dToPixel(cv::Point3d(point_tf.x, point_tf.y, point_tf.z)); + cv::Point2d xy_rect = + m_is_sim + ? m_cam_model.project3dToPixel(cv::Point3d(point_tf.y, point_tf.z, -point_tf.x)) + : m_cam_model.project3dToPixel(cv::Point3d(point_tf.x, point_tf.y, point_tf.z)); // Plot projected point onto image if within bounds and in front of the boat - if ((xy_rect.x >= 0) && (xy_rect.x < m_cam_model.cameraInfo().width) && - (xy_rect.y >= 0) && (xy_rect.y < m_cam_model.cameraInfo().height) && + if ((xy_rect.x >= 0) && (xy_rect.x < m_cam_model.cameraInfo().width) && (xy_rect.y >= 0) && + (xy_rect.y < m_cam_model.cameraInfo().height) && (m_is_sim ? point_tf.x >= 0 : point_tf.z >= 0)) { cv::circle(cv_ptr->image, cv::Point(xy_rect.x, xy_rect.y), 2, cv::Scalar(255, 0, 0), 4); } - + // Debug statements - RCLCPP_DEBUG(this->get_logger(), "3D point in camera frame: (%.2f, %.2f, %.2f)", - point_tf.x, point_tf.y, point_tf.z); - RCLCPP_DEBUG(this->get_logger(), "2D point in pixel coordinates: (%.2f, %.2f)", - xy_rect.x, xy_rect.y); + RCLCPP_DEBUG(this->get_logger(), "3D point in camera frame: (%.2f, %.2f, %.2f)", point_tf.x, + point_tf.y, point_tf.z); + RCLCPP_DEBUG(this->get_logger(), "2D point in pixel coordinates: (%.2f, %.2f)", xy_rect.x, + xy_rect.y); } m_image_pub->publish(*cv_ptr->toImageMsg()); } diff --git a/all_seaing_perception/all_seaing_perception/yolov8_node.py b/all_seaing_perception/all_seaing_perception/yolov8_node.py index 981c7cd8..e4c5e895 100755 --- a/all_seaing_perception/all_seaing_perception/yolov8_node.py +++ b/all_seaing_perception/all_seaing_perception/yolov8_node.py @@ -79,7 +79,6 @@ def __init__(self) -> None: self.model = YOLO(pt_path) else: self.get_logger().error(f"Both model paths do not exist :( TensorRT: {engine_path} and pt: {pt_path}") - # Setup QoS profile image_qos_profile = QoSProfile( reliability=QoSReliabilityPolicy.BEST_EFFORT, diff --git a/all_seaing_perception/all_seaing_perception/yolov8_train.ipynb b/all_seaing_perception/all_seaing_perception/yolov8_train_buoys.ipynb similarity index 100% rename from all_seaing_perception/all_seaing_perception/yolov8_train.ipynb rename to all_seaing_perception/all_seaing_perception/yolov8_train_buoys.ipynb diff --git a/all_seaing_perception/all_seaing_perception/yolov8_train_shapes.ipynb b/all_seaing_perception/all_seaing_perception/yolov8_train_shapes.ipynb new file mode 100644 index 00000000..1630351a --- /dev/null +++ b/all_seaing_perception/all_seaing_perception/yolov8_train_shapes.ipynb @@ -0,0 +1,520 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\nTrain the model using Yolov8 and Roboflow\\n\\nReference: https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/\\n'" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\"\"\n", + "Train the model using Yolov8 and Roboflow\n", + "\n", + "Reference: https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nvcc: NVIDIA (R) Cuda compiler driver\n", + "Copyright (c) 2005-2025 NVIDIA Corporation\n", + "Built on Wed_Jan_15_19:20:09_PST_2025\n", + "Cuda compilation tools, release 12.8, V12.8.61\n", + "Build cuda_12.8.r12.8/compiler.35404655_0\n" + ] + } + ], + "source": [ + "!(nvcc --version) # Check if cuda is installed" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Thu Jan 30 02:13:40 2025 \n", + "+-----------------------------------------------------------------------------------------+\n", + "| NVIDIA-SMI 560.35.05 Driver Version: 560.35.05 CUDA Version: 12.6 |\n", + "|-----------------------------------------+------------------------+----------------------+\n", + "| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n", + "| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n", + "| | | MIG M. |\n", + "|=========================================+========================+======================|\n", + "| 0 NVIDIA GeForce RTX 3060 ... Off | 00000000:01:00.0 Off | N/A |\n", + "| N/A 51C P0 25W / 80W | 15MiB / 6144MiB | 0% Default |\n", + "| | | N/A |\n", + "+-----------------------------------------+------------------------+----------------------+\n", + " \n", + "+-----------------------------------------------------------------------------------------+\n", + "| Processes: |\n", + "| GPU GI CI PID Type Process name GPU Memory |\n", + "| ID ID Usage |\n", + "|=========================================================================================|\n", + "| 0 N/A N/A 1591 G /usr/lib/xorg/Xorg 4MiB |\n", + "+-----------------------------------------------------------------------------------------+\n" + ] + } + ], + "source": [ + "!(nvidia-smi) # Check if cuda driver is working" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ultralytics YOLOv8.1.29 🚀 Python-3.10.12 torch-2.2.0+cu121 CUDA:0 (NVIDIA GeForce RTX 3060 Laptop GPU, 5834MiB)\n", + "Setup complete ✅ (16 CPUs, 14.9 GB RAM, 81.7/97.5 GB disk)\n", + "GPU is being used: True\n" + ] + } + ], + "source": [ + "# if not installed run: pip install ultralytics\n", + "import ultralytics\n", + "from torch import cuda\n", + "ultralytics.checks()\n", + "print(\"GPU is being used: \", cuda.is_available())" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "loading Roboflow workspace...\n", + "loading Roboflow project...\n", + "Dependency ultralytics==8.0.196 is required but found version=8.1.29, to fix: `pip install ultralytics==8.0.196`\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Downloading Dataset Version Zip in Shape-Data-8 to yolov8:: 100%|██████████| 1008907/1008907 [03:52<00:00, 4338.57it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Extracting Dataset Version Zip to Shape-Data-8 in yolov8:: 100%|██████████| 30334/30334 [00:03<00:00, 8164.51it/s] \n" + ] + } + ], + "source": [ + "# if not installed run: pip install roboflow\n", + "# if you have a weird 'appengine' error run this: pip install urllib3==2.0.7\n", + "# other errors, run through chat gpt\n", + "# this was copied from exporting the dataset on roboflow with 'yolov8'\n", + "\n", + "\"\"\"\n", + "Reference to Shape Dataset: https://universe.roboflow.com/arcturus-project/shape-data\n", + "\"\"\"\n", + "\n", + "from roboflow import Roboflow\n", + "rf = Roboflow(api_key=\"O3voW9YlufPmeJibf4Jm\") # Private Key for Team\n", + "project = rf.workspace(\"arcturus-project\").project(\"shape-data\")\n", + "version = project.version(8)\n", + "dataset = version.download(\"yolov8\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run the training of the model with the correct location to the data.yaml and custom settings (could take a while)\n", + "# This assumes data folder is in perception folder and this document is in perception folder\n", + "data_folder = ''\n", + "\n", + "imgsz = 640\n", + "\n", + "model= 'yolov8s.pt' \n", + "\n", + "# ! symbol lets you run command line interface commands (terminal commands) \n", + "# Run this in command line if you get errors\n", + "# replace data.location with the name of the file folder\n", + "# WARNING: This will take a while and consume a lot of computer resources\n", + "!yolo task=detect \\\n", + " mode=train \\\n", + " model=yolov8s.pt \\\n", + " data={data.location}/data.yaml \\\n", + " epochs=25 \\\n", + " imgsz=640 \\\n", + " plots=True\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# validation of training results\n", + "\n", + "!yolo task=detect \\\n", + " mode=val \\\n", + " model={HOME}/runs/detect/train/weights/best.pt \\\n", + " data={dataset.location}/data.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# predict using model\n", + "\n", + "!yolo task=detect \\\n", + " mode=predict \\\n", + " model={HOME}/runs/detect/train/weights/best.pt \\\n", + " conf=0.25 \\\n", + " source={dataset.location}/test/images" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "0: 480x640 1 red-buoy, 1 yellow-buoy, 1 yellow-duck, 26.3ms\n", + "1: 480x640 2 green-buoys, 1 red-buoy, 1 yellow-buoy, 26.3ms\n", + "2: 480x640 2 green-buoys, 1 red-buoy, 1 yellow-buoy, 1 yellow-duck, 26.3ms\n", + "3: 480x640 3 green-buoys, 1 red-buoy, 1 yellow-buoy, 26.3ms\n", + "Speed: 3.5ms preprocess, 26.3ms inference, 0.6ms postprocess per image at shape (1, 3, 480, 640)\n", + "res = ultralytics.engine.results.Results object with attributes:\n", + "\n", + "boxes: ultralytics.engine.results.Boxes object\n", + "keypoints: None\n", + "masks: None\n", + "names: {0: 'black-buoy', 1: 'blue-buoy', 2: 'blue-cross', 3: 'green-buoy', 4: 'green-column-buoy', 5: 'red-buoy', 6: 'red-column-buoy', 7: 'red-dot', 8: 'yellow-buoy', 9: 'yellow-duck'}\n", + "obb: None\n", + "orig_img: array([[[ 35, 4, 213],\n", + " [ 34, 3, 212],\n", + " [ 37, 6, 215],\n", + " ...,\n", + " [ 62, 81, 102],\n", + " [ 42, 61, 82],\n", + " [ 60, 79, 100]],\n", + "\n", + " [[ 38, 7, 216],\n", + " [ 36, 5, 214],\n", + " [ 38, 7, 216],\n", + " ...,\n", + " [ 64, 83, 104],\n", + " [ 60, 79, 100],\n", + " [ 55, 74, 95]],\n", + "\n", + " [[ 39, 8, 217],\n", + " [ 44, 13, 222],\n", + " [ 39, 8, 217],\n", + " ...,\n", + " [ 64, 83, 104],\n", + " [ 59, 78, 99],\n", + " [ 57, 76, 97]],\n", + "\n", + " ...,\n", + "\n", + " [[150, 165, 137],\n", + " [148, 163, 135],\n", + " [147, 162, 134],\n", + " ...,\n", + " [125, 140, 109],\n", + " [120, 135, 104],\n", + " [129, 144, 113]],\n", + "\n", + " [[147, 162, 134],\n", + " [147, 162, 134],\n", + " [145, 160, 132],\n", + " ...,\n", + " [133, 148, 117],\n", + " [123, 138, 107],\n", + " [118, 133, 102]],\n", + "\n", + " [[150, 165, 137],\n", + " [148, 163, 135],\n", + " [147, 162, 134],\n", + " ...,\n", + " [134, 149, 118],\n", + " [134, 149, 118],\n", + " [131, 146, 115]]], dtype=uint8)\n", + "orig_shape: (3000, 4000)\n", + "path: 'arcturus_test_image_1.jpg'\n", + "probs: None\n", + "save_dir: 'runs/detect/predict'\n", + "speed: {'preprocess': 3.4845471382141113, 'inference': 26.294291019439697, 'postprocess': 0.5663633346557617}\n", + "res = ultralytics.engine.results.Results object with attributes:\n", + "\n", + "boxes: ultralytics.engine.results.Boxes object\n", + "keypoints: None\n", + "masks: None\n", + "names: {0: 'black-buoy', 1: 'blue-buoy', 2: 'blue-cross', 3: 'green-buoy', 4: 'green-column-buoy', 5: 'red-buoy', 6: 'red-column-buoy', 7: 'red-dot', 8: 'yellow-buoy', 9: 'yellow-duck'}\n", + "obb: None\n", + "orig_img: array([[[187, 188, 184],\n", + " [188, 189, 185],\n", + " [188, 189, 185],\n", + " ...,\n", + " [178, 179, 177],\n", + " [177, 178, 176],\n", + " [179, 180, 178]],\n", + "\n", + " [[187, 188, 184],\n", + " [188, 189, 185],\n", + " [189, 190, 186],\n", + " ...,\n", + " [176, 177, 175],\n", + " [177, 178, 176],\n", + " [179, 180, 178]],\n", + "\n", + " [[188, 189, 185],\n", + " [190, 191, 187],\n", + " [191, 192, 188],\n", + " ...,\n", + " [173, 174, 172],\n", + " [176, 177, 175],\n", + " [178, 179, 177]],\n", + "\n", + " ...,\n", + "\n", + " [[182, 193, 161],\n", + " [180, 191, 159],\n", + " [179, 190, 158],\n", + " ...,\n", + " [182, 191, 165],\n", + " [183, 192, 166],\n", + " [183, 192, 166]],\n", + "\n", + " [[182, 193, 161],\n", + " [181, 192, 160],\n", + " [181, 192, 160],\n", + " ...,\n", + " [183, 192, 166],\n", + " [184, 193, 167],\n", + " [183, 192, 166]],\n", + "\n", + " [[180, 191, 159],\n", + " [181, 192, 160],\n", + " [181, 192, 160],\n", + " ...,\n", + " [183, 192, 166],\n", + " [184, 193, 167],\n", + " [182, 191, 165]]], dtype=uint8)\n", + "orig_shape: (3000, 4000)\n", + "path: 'arcturus_test_image_2.jpg'\n", + "probs: None\n", + "save_dir: 'runs/detect/predict'\n", + "speed: {'preprocess': 3.4845471382141113, 'inference': 26.294291019439697, 'postprocess': 0.5663633346557617}\n", + "res = ultralytics.engine.results.Results object with attributes:\n", + "\n", + "boxes: ultralytics.engine.results.Boxes object\n", + "keypoints: None\n", + "masks: None\n", + "names: {0: 'black-buoy', 1: 'blue-buoy', 2: 'blue-cross', 3: 'green-buoy', 4: 'green-column-buoy', 5: 'red-buoy', 6: 'red-column-buoy', 7: 'red-dot', 8: 'yellow-buoy', 9: 'yellow-duck'}\n", + "obb: None\n", + "orig_img: array([[[178, 179, 175],\n", + " [179, 180, 176],\n", + " [180, 181, 177],\n", + " ...,\n", + " [161, 164, 162],\n", + " [160, 163, 161],\n", + " [159, 162, 160]],\n", + "\n", + " [[178, 179, 175],\n", + " [179, 180, 176],\n", + " [180, 181, 177],\n", + " ...,\n", + " [159, 162, 160],\n", + " [159, 162, 160],\n", + " [160, 163, 161]],\n", + "\n", + " [[178, 179, 175],\n", + " [179, 180, 176],\n", + " [180, 181, 177],\n", + " ...,\n", + " [156, 159, 157],\n", + " [157, 160, 158],\n", + " [160, 163, 161]],\n", + "\n", + " ...,\n", + "\n", + " [[168, 178, 148],\n", + " [168, 178, 148],\n", + " [168, 178, 148],\n", + " ...,\n", + " [168, 177, 151],\n", + " [170, 179, 153],\n", + " [176, 185, 159]],\n", + "\n", + " [[171, 181, 151],\n", + " [171, 181, 151],\n", + " [170, 180, 150],\n", + " ...,\n", + " [171, 180, 154],\n", + " [169, 178, 152],\n", + " [172, 181, 155]],\n", + "\n", + " [[173, 183, 153],\n", + " [173, 183, 153],\n", + " [173, 183, 153],\n", + " ...,\n", + " [175, 184, 158],\n", + " [172, 181, 155],\n", + " [172, 181, 155]]], dtype=uint8)\n", + "orig_shape: (3000, 4000)\n", + "path: 'arcturus_test_image_3.jpg'\n", + "probs: None\n", + "save_dir: 'runs/detect/predict'\n", + "speed: {'preprocess': 3.4845471382141113, 'inference': 26.294291019439697, 'postprocess': 0.5663633346557617}\n", + "res = ultralytics.engine.results.Results object with attributes:\n", + "\n", + "boxes: ultralytics.engine.results.Boxes object\n", + "keypoints: None\n", + "masks: None\n", + "names: {0: 'black-buoy', 1: 'blue-buoy', 2: 'blue-cross', 3: 'green-buoy', 4: 'green-column-buoy', 5: 'red-buoy', 6: 'red-column-buoy', 7: 'red-dot', 8: 'yellow-buoy', 9: 'yellow-duck'}\n", + "obb: None\n", + "orig_img: array([[[173, 174, 170],\n", + " [175, 176, 172],\n", + " [173, 174, 170],\n", + " ...,\n", + " [126, 133, 130],\n", + " [119, 126, 123],\n", + " [110, 117, 114]],\n", + "\n", + " [[171, 172, 168],\n", + " [172, 173, 169],\n", + " [172, 173, 169],\n", + " ...,\n", + " [121, 128, 125],\n", + " [114, 121, 118],\n", + " [107, 114, 111]],\n", + "\n", + " [[168, 169, 165],\n", + " [168, 169, 165],\n", + " [169, 170, 166],\n", + " ...,\n", + " [116, 123, 120],\n", + " [109, 116, 113],\n", + " [104, 111, 108]],\n", + "\n", + " ...,\n", + "\n", + " [[161, 172, 122],\n", + " [160, 171, 121],\n", + " [160, 170, 124],\n", + " ...,\n", + " [153, 165, 117],\n", + " [154, 164, 121],\n", + " [154, 164, 121]],\n", + "\n", + " [[164, 175, 125],\n", + " [163, 174, 124],\n", + " [161, 171, 125],\n", + " ...,\n", + " [152, 164, 116],\n", + " [152, 162, 119],\n", + " [153, 163, 120]],\n", + "\n", + " [[162, 173, 123],\n", + " [164, 175, 125],\n", + " [163, 173, 127],\n", + " ...,\n", + " [150, 162, 114],\n", + " [150, 160, 117],\n", + " [152, 162, 119]]], dtype=uint8)\n", + "orig_shape: (3000, 4000)\n", + "path: 'arcturus_test_image_4.jpg'\n", + "probs: None\n", + "save_dir: 'runs/detect/predict'\n", + "speed: {'preprocess': 3.4845471382141113, 'inference': 26.294291019439697, 'postprocess': 0.5663633346557617}\n" + ] + } + ], + "source": [ + "\n", + "from ultralytics import YOLO\n", + "model = YOLO(\"runs/detect/train_roboboat/weights/best.pt\")\n", + "\n", + "# results = model.predict(source=\"RoboBoat-Comp-2024-4/test/images\")\n", + "results = model([\"arcturus_test_image_1.jpg\",\n", + " \"arcturus_test_image_2.jpg\",\n", + " \"arcturus_test_image_3.jpg\",\n", + " \"arcturus_test_image_4.jpg\"])\n", + "for i, result in enumerate(results):\n", + " print(f\"res = {result}\")\n", + " boxes = result.boxes # Boxes object for bounding box outputs\n", + " masks = result.masks # Masks object for segmentation masks outputs\n", + " keypoints = result.keypoints # Keypoints object for pose outputs\n", + " probs = result.probs # Probs object for classification outputs\n", + " result.save(filename=f'result_{i}.jpg') # specify filename to save to disk" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/all_seaing_perception/include/all_seaing_perception/obstacle_bbox_visualizer.hpp b/all_seaing_perception/include/all_seaing_perception/obstacle_bbox_visualizer.hpp deleted file mode 100644 index d4d4a627..00000000 --- a/all_seaing_perception/include/all_seaing_perception/obstacle_bbox_visualizer.hpp +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef ALL_SEAING_PERCEPTION__OBSTACLE_BBOX_VISUALIZER_HPP -#define ALL_SEAING_PERCEPTION__OBSTACLE_BBOX_VISUALIZER_HPP - -#include -#include "rclcpp/rclcpp.hpp" - -#include "tf2_ros/buffer.h" -#include "tf2_ros/transform_listener.h" -#include "geometry_msgs/msg/transform_stamped.hpp" - -#include "image_geometry/pinhole_camera_model.h" - -#include "message_filters/subscriber.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/synchronizer.h" - -#include "sensor_msgs/msg/image.hpp" -#include "sensor_msgs/msg/camera_info.hpp" -#include "all_seaing_interfaces/msg/obstacle_map.hpp" -#include "all_seaing_interfaces/msg/labeled_bounding_box2_d_array.hpp" -#include -#include "yaml-cpp/yaml.h" - - -class ObstacleBboxVisualizer : public rclcpp::Node { -private: - // Publishers and subscribers - rclcpp::Publisher::SharedPtr m_image_pub; - rclcpp::Subscription::SharedPtr m_image_intrinsics_sub; - message_filters::Subscriber m_image_sub; - message_filters::Subscriber m_obstacle_map_sub; - message_filters::Subscriber m_bbox_sub; - - // Transform variables - std::shared_ptr m_tf_listener{nullptr}; - std::unique_ptr m_tf_buffer; - geometry_msgs::msg::TransformStamped m_pc_cam_tf; - bool m_pc_cam_tf_ok; - std::string color_label_mappings_file; - YAML::Node config_yaml; - std::map label_color_map; - - // Camera model - image_geometry::PinholeCameraModel m_cam_model; - - // Synchronizer - typedef message_filters::sync_policies::ApproximateTime< - sensor_msgs::msg::Image, - all_seaing_interfaces::msg::ObstacleMap, - all_seaing_interfaces::msg::LabeledBoundingBox2DArray> - ObstacleBboxVisualizerPolicy; - typedef message_filters::Synchronizer ObstacleBboxVisualizerSync; - std::shared_ptr m_obstacle_bbox_visualizer_sync; - - // Callback for synchronized image and obstacle map - void image_obstacle_cb( - const sensor_msgs::msg::Image::ConstSharedPtr& in_img_msg, - const all_seaing_interfaces::msg::ObstacleMap::ConstSharedPtr& in_map_msg, - const all_seaing_interfaces::msg::LabeledBoundingBox2DArray::ConstSharedPtr& in_bbox_msg); - - // Callback for camera info - void intrinsics_cb(const sensor_msgs::msg::CameraInfo::ConstSharedPtr& info_msg); - - // Helper function to get color for label - cv::Scalar get_color_for_label(const int& label); - - geometry_msgs::msg::TransformStamped get_tf(const std::string &in_target_frame, - const std::string &in_src_frame); - -public: - ObstacleBboxVisualizer(); - virtual ~ObstacleBboxVisualizer(); -}; - -#endif // ALL_SEAING_PERCEPTION__OBSTACLE_BBOX_VISUALIZER_HPP \ No newline at end of file diff --git a/all_seaing_perception/models/YoloV8s-Shape-Detector.pt b/all_seaing_perception/models/YoloV8s-Shape-Detector.pt new file mode 100644 index 00000000..6dc546d4 Binary files /dev/null and b/all_seaing_perception/models/YoloV8s-Shape-Detector.pt differ diff --git a/all_seaing_rviz_plugins/src/obstacle_map_display.cpp b/all_seaing_rviz_plugins/src/obstacle_map_display.cpp index 9e5342b0..8d626191 100644 --- a/all_seaing_rviz_plugins/src/obstacle_map_display.cpp +++ b/all_seaing_rviz_plugins/src/obstacle_map_display.cpp @@ -150,7 +150,7 @@ ObstacleMapDisplay::get_text(bool is_labeled, const all_seaing_interfaces::msg:: if (is_labeled) marker->text = std::to_string(obstacle.label); else - marker->text = std::to_string(obstacle.id); + marker->text = ""; marker->pose.position.x = obstacle.global_point.point.x; @@ -172,4 +172,4 @@ void ObstacleMapDisplay::reset() { } // namespace all_seaing_rviz_plugins #include "pluginlib/class_list_macros.hpp" -PLUGINLIB_EXPORT_CLASS(all_seaing_rviz_plugins::ObstacleMapDisplay, rviz_common::Display) \ No newline at end of file +PLUGINLIB_EXPORT_CLASS(all_seaing_rviz_plugins::ObstacleMapDisplay, rviz_common::Display)