diff --git a/all_seaing_autonomy/all_seaing_autonomy/roboboat/waypoint_finder.py b/all_seaing_autonomy/all_seaing_autonomy/roboboat/waypoint_finder.py index 94c703fd..820388aa 100755 --- a/all_seaing_autonomy/all_seaing_autonomy/roboboat/waypoint_finder.py +++ b/all_seaing_autonomy/all_seaing_autonomy/roboboat/waypoint_finder.py @@ -12,7 +12,6 @@ import os from ament_index_python.packages import get_package_share_directory - class WaypointFinder(Node): def __init__(self): super().__init__("waypoint_finder") diff --git a/all_seaing_bringup/config/perception/contour_matching_color_ranges.yaml b/all_seaing_bringup/config/perception/contour_matching_color_ranges.yaml new file mode 100644 index 00000000..f78a8fa5 --- /dev/null +++ b/all_seaing_bringup/config/perception/contour_matching_color_ranges.yaml @@ -0,0 +1,6 @@ +orange: [10, 30, 40, 255, 40, 255] +red: [0, 15, 40, 255, 40, 255] +red2: [150, 179, 40, 255, 40, 255] +green: [35, 80, 40, 255, 40, 255] +black: [0, 0, 0, 0, 0, 50] +white: [0, 0, 0, 10, 200, 255] \ No newline at end of file diff --git a/all_seaing_bringup/config/perception/matching_weights.yaml b/all_seaing_bringup/config/perception/matching_weights.yaml new file mode 100644 index 00000000..dd5b6228 --- /dev/null +++ b/all_seaing_bringup/config/perception/matching_weights.yaml @@ -0,0 +1,7 @@ +clustering_distance_weight: 1.0 +clustering_color_weights: [1.0, 0.0, 0.0] +clustering_color_thres: 30 +cluster_contour_distance_weight: 1.0 +cluster_contour_color_weights: [1.0, 0.0, 0.0] +contour_detection_color_weights: [1.0, 0.0, 0.0] +cluster_contour_size_weight: 1.0 \ No newline at end of file diff --git a/all_seaing_bringup/launch/sim.launch.py b/all_seaing_bringup/launch/sim.launch.py index bdde25af..fe8e3ecf 100644 --- a/all_seaing_bringup/launch/sim.launch.py +++ b/all_seaing_bringup/launch/sim.launch.py @@ -33,7 +33,12 @@ def launch_setup(context, *args, **kwargs): color_ranges = os.path.join( bringup_prefix, "config", "perception", "color_ranges.yaml" ) - + matching_weights = os.path.join( + bringup_prefix, "config", "perception", "matching_weights.yaml" + ) + contour_matching_color_ranges = os.path.join( + bringup_prefix, "config", "perception", "contour_matching_color_ranges.yaml" + ) subprocess.run(["cp", "-r", os.path.join(bringup_prefix, "tile"), "/tmp"]) launch_rviz = LaunchConfiguration("launch_rviz") @@ -162,6 +167,42 @@ def launch_setup(context, *args, **kwargs): ], ) + bbox_project_pcloud_node = launch_ros.actions.Node( + package="all_seaing_perception", + executable="bbox_project_pcloud", + output="screen", + remappings=[ + ("camera_info_topic", "/wamv/sensors/cameras/front_left_camera_sensor/camera_info"), + ("camera_topic", "/wamv/sensors/cameras/front_left_camera_sensor/image_raw"), + ("lidar_topic", "point_cloud/filtered") + ], + parameters=[ + {"bbox_object_margin": 0.0}, + {"color_label_mappings_file": color_label_mappings}, + {"color_ranges_file": color_ranges}, + {"obstacle_size_min": 2}, + {"obstacle_size_max": 60}, + {"clustering_distance": 1.0}, + {"matching_weights_file": matching_weights}, + {"contour_matching_color_ranges_file": contour_matching_color_ranges} + ] + ) + + object_tracking_map_node = launch_ros.actions.Node( + package="all_seaing_perception", + executable="object_tracking_map", + output="screen", + arguments=['--ros-args', '--log-level', 'debug'], + remappings=[ + ("camera_info_topic", "/wamv/sensors/cameras/front_left_camera_sensor/camera_info"), + ], + parameters=[ + {"global_frame_id": "odom"}, + {"obstacle_seg_thresh": 10.0}, + {"obstacle_drop_thresh": 1.0}, + ] + ) + rviz_node = launch_ros.actions.Node( package="rviz2", executable="rviz2", @@ -274,6 +315,8 @@ def launch_setup(context, *args, **kwargs): controller_server, obstacle_bbox_overlay_node, obstacle_bbox_visualizer_node, + bbox_project_pcloud_node, + object_tracking_map_node, obstacle_detector_node, color_segmentation_node, point_cloud_filter_node, diff --git a/all_seaing_interfaces/CMakeLists.txt b/all_seaing_interfaces/CMakeLists.txt index adae1263..5139d5bc 100644 --- a/all_seaing_interfaces/CMakeLists.txt +++ b/all_seaing_interfaces/CMakeLists.txt @@ -24,8 +24,11 @@ rosidl_generate_interfaces(${PROJECT_NAME} "msg/WaypointArray.msg" "msg/ObstacleCounter.msg" "msg/ObstacleCounterArray.msg" + "msg/LabeledObjectPointCloud.msg" + "msg/LabeledObjectPointCloudArray.msg" DEPENDENCIES geometry_msgs + sensor_msgs ) ament_auto_package() diff --git a/all_seaing_interfaces/msg/LabeledObjectPointCloud.msg b/all_seaing_interfaces/msg/LabeledObjectPointCloud.msg new file mode 100644 index 00000000..71bb3f28 --- /dev/null +++ b/all_seaing_interfaces/msg/LabeledObjectPointCloud.msg @@ -0,0 +1,4 @@ +builtin_interfaces/Time time +int8 label +sensor_msgs/PointCloud2 cloud +sensor_msgs/Image segment \ No newline at end of file diff --git a/all_seaing_interfaces/msg/LabeledObjectPointCloudArray.msg b/all_seaing_interfaces/msg/LabeledObjectPointCloudArray.msg new file mode 100644 index 00000000..9d847f39 --- /dev/null +++ b/all_seaing_interfaces/msg/LabeledObjectPointCloudArray.msg @@ -0,0 +1 @@ +all_seaing_interfaces/LabeledObjectPointCloud[] objects \ No newline at end of file diff --git a/all_seaing_interfaces/msg/Obstacle.msg b/all_seaing_interfaces/msg/Obstacle.msg index e8ab823d..b1271b33 100644 --- a/all_seaing_interfaces/msg/Obstacle.msg +++ b/all_seaing_interfaces/msg/Obstacle.msg @@ -12,4 +12,4 @@ geometry_msgs/Point bbox_min geometry_msgs/Point bbox_max geometry_msgs/Point global_bbox_min -geometry_msgs/Point global_bbox_max +geometry_msgs/Point global_bbox_max \ No newline at end of file diff --git a/all_seaing_perception/CMakeLists.txt b/all_seaing_perception/CMakeLists.txt index 5b5af644..c66e8ed1 100644 --- a/all_seaing_perception/CMakeLists.txt +++ b/all_seaing_perception/CMakeLists.txt @@ -12,6 +12,8 @@ ament_auto_find_build_dependencies() find_package(yaml-cpp REQUIRED) +find_package(PCL 1.12 REQUIRED COMPONENTS common visualization io) + ament_auto_add_executable( obstacle_detector ${PROJECT_NAME}/obstacle_detector.cpp @@ -33,15 +35,28 @@ ament_auto_add_executable( obstacle_bbox_visualizer ${PROJECT_NAME}/obstacle_bbox_visualizer.cpp ) +ament_auto_add_executable( + bbox_project_pcloud + ${PROJECT_NAME}/bbox_project_pcloud.cpp +) +ament_auto_add_executable( + object_tracking_map + ${PROJECT_NAME}/object_tracking_map.cpp + ${PROJECT_NAME}/obstacle.cpp +) target_link_libraries(obstacle_bbox_visualizer yaml-cpp) +target_link_libraries(bbox_project_pcloud yaml-cpp ${PCL_LIBRARIES} ${PCL_COMMON_LIBRARIES} ${PCL_IO_LIBRARIES} ${PCL_VISUALIZATION_LIBRARIES}) +target_link_libraries(object_tracking_map yaml-cpp ${PCL_LIBRARIES} ${PCL_COMMON_LIBRARIES} ${PCL_IO_LIBRARIES} ${PCL_VISUALIZATION_LIBRARIES}) install(TARGETS obstacle_detector obstacle_bbox_overlay point_cloud_filter point_cloud_image_overlay - obstacle_bbox_visualizer + obstacle_bbox_visualizer + bbox_project_pcloud + object_tracking_map DESTINATION lib/${PROJECT_NAME} ) diff --git a/all_seaing_perception/all_seaing_perception/bbox_project_pcloud.cpp b/all_seaing_perception/all_seaing_perception/bbox_project_pcloud.cpp new file mode 100644 index 00000000..995b223a --- /dev/null +++ b/all_seaing_perception/all_seaing_perception/bbox_project_pcloud.cpp @@ -0,0 +1,568 @@ +#include "all_seaing_perception/bbox_project_pcloud.hpp" + +BBoxProjectPCloud::BBoxProjectPCloud() : Node("bbox_project_pcloud"){ + // Initialize tf_listener pointer + m_tf_buffer = std::make_unique(this->get_clock()); + m_tf_listener = std::make_shared(*m_tf_buffer); + + //essential ones + this->declare_parameter("bbox_object_margin", 0.0); + + // for cluster extraction + this->declare_parameter("obstacle_size_min", 20); + this->declare_parameter("obstacle_size_max", 100000); + this->declare_parameter("clustering_distance", 0.75); + + m_obstacle_size_min = this->get_parameter("obstacle_size_min").as_int(); + m_obstacle_size_max = this->get_parameter("obstacle_size_max").as_int(); + m_clustering_distance = this->get_parameter("clustering_distance").as_double(); + + // for color segmentation + this->declare_parameter("color_label_mappings_file", ""); + + color_label_mappings_file = this->get_parameter("color_label_mappings_file").as_string(); + + // for cluster-contour matching & selection + this->declare_parameter("matching_weights_file", ""); + this->declare_parameter("contour_matching_color_ranges_file", ""); + + matching_weights_file = this->get_parameter("matching_weights_file").as_string(); + contour_matching_color_ranges_file = this->get_parameter("contour_matching_color_ranges_file").as_string(); + + // Subscriptions + m_image_intrinsics_sub = this->create_subscription( + "camera_info_topic", 10, std::bind(&BBoxProjectPCloud::intrinsics_cb, this, std::placeholders::_1)); + m_image_sub.subscribe(this, "camera_topic", rmw_qos_profile_sensor_data); + m_cloud_sub.subscribe(this, "lidar_topic", rmw_qos_profile_sensor_data); + m_bbox_sub.subscribe(this, "bounding_boxes", rmw_qos_profile_sensor_data); + + // Send pc msg and img msg to bb_pcl_project + m_pc_cam_bbox_sync = + std::make_shared(PointCloudCamBBoxPolicy(10), m_image_sub, m_cloud_sub, m_bbox_sub); + m_pc_cam_bbox_sync->registerCallback(std::bind(&BBoxProjectPCloud::bb_pcl_project, this, + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); + + // Publishers + m_object_pcl_pub = this->create_publisher("labeled_object_point_clouds", 5); + m_object_pcl_viz_pub = this->create_publisher("object_point_clouds_viz", 5); + m_refined_object_pcl_segment_pub = this->create_publisher("refined_object_point_clouds_segments", 5); + m_refined_object_pcl_viz_pub = this->create_publisher("refined_object_point_clouds_viz", 5); + m_refined_object_segment_viz_pub = this->create_publisher("refined_object_segments_viz", 5); + + // get color label mappings from yaml + RCLCPP_DEBUG(this->get_logger(), "READING COLOR LABEL MAPPINGS"); + std::ifstream label_yaml(color_label_mappings_file); + if (label_yaml.is_open()) { + label_config_yaml = YAML::Load(label_yaml); + for (YAML::const_iterator it = label_config_yaml.begin(); it != label_config_yaml.end(); ++it) { + label_color_map[it->second.as()] = it->first.as(); + RCLCPP_DEBUG(this->get_logger(), "%d -> %s", it->second.as(), it->first.as().c_str()); + } + } + else { + RCLCPP_ERROR(this->get_logger(), "Failed to open YAML file: %s", color_label_mappings_file.c_str()); + } + label_yaml.close(); + + // get cluster-contour matching & selection parameters from yaml + RCLCPP_DEBUG(this->get_logger(), "READING MATCHING PARAMETERS"); + std::ifstream matching_yaml(matching_weights_file); + if (matching_yaml.is_open()) { + matching_weights_config_yaml = YAML::Load(matching_yaml); + + m_clustering_distance_weight = matching_weights_config_yaml["clustering_distance_weight"].as(); + m_clustering_color_weights = matching_weights_config_yaml["clustering_color_weights"].as>(); + m_clustering_color_thres = matching_weights_config_yaml["clustering_color_thres"].as(); + m_cluster_contour_distance_weight = matching_weights_config_yaml["cluster_contour_distance_weight"].as(); + m_cluster_contour_color_weights = matching_weights_config_yaml["cluster_contour_color_weights"].as>(); + m_contour_detection_color_weights = matching_weights_config_yaml["contour_detection_color_weights"].as>(); + m_cluster_contour_size_weight = matching_weights_config_yaml["cluster_contour_size_weight"].as(); + + RCLCPP_DEBUG(this->get_logger(), "clustering_distance_weight: %lf", m_clustering_distance_weight); + RCLCPP_DEBUG(this->get_logger(), "clustering_color_weights: [%lf, %lf, %lf]", m_clustering_color_weights[0], m_clustering_color_weights[1], m_clustering_color_weights[2]); + RCLCPP_DEBUG(this->get_logger(), "clustering_color_thres: %lf", m_clustering_color_thres); + RCLCPP_DEBUG(this->get_logger(), "cluster_contour_distance_weight: %lf", m_cluster_contour_distance_weight); + RCLCPP_DEBUG(this->get_logger(), "cluster_contour_color_weights: [%lf, %lf, %lf]", m_cluster_contour_color_weights[0], m_cluster_contour_color_weights[1], m_cluster_contour_color_weights[2]); + RCLCPP_DEBUG(this->get_logger(), "contour_detection_color_weights: [%lf, %lf, %lf]", m_contour_detection_color_weights[0], m_contour_detection_color_weights[1], m_contour_detection_color_weights[2]); + } + else { + RCLCPP_ERROR(this->get_logger(), "Failed to open YAML file: %s", matching_weights_file.c_str()); + } + matching_yaml.close(); + + // get contour matching color ranges from yaml + RCLCPP_DEBUG(this->get_logger(), "READING CONTOUR MATCHING COLOR RANGES"); + std::ifstream contour_ranges_yaml(contour_matching_color_ranges_file); + if (contour_ranges_yaml.is_open()) { + contour_matching_ranges_config_yaml = YAML::Load(contour_ranges_yaml); + for (YAML::const_iterator it = contour_matching_ranges_config_yaml.begin(); it != contour_matching_ranges_config_yaml.end(); ++it) { + contour_matching_color_range_map[it->first.as()] = it->second.as>(); + RCLCPP_DEBUG(this->get_logger(), "%s -> [%d, %d, %d, %d, %d, %d]", it->first.as().c_str(), it->second.as>()[0], it->second.as>()[1], it->second.as>()[2], it->second.as>()[3], it->second.as>()[4], it->second.as>()[5]); + } + } + else { + RCLCPP_ERROR(this->get_logger(), "Failed to open YAML file: %s", contour_matching_color_ranges_file.c_str()); + } + contour_ranges_yaml.close(); +} + +void BBoxProjectPCloud::intrinsics_cb(const sensor_msgs::msg::CameraInfo &info_msg) { + RCLCPP_DEBUG(this->get_logger(), "GOT CAMERA INFO"); + m_cam_model.fromCameraInfo(info_msg); +} + +// The HSV-similarity condition for Conditional Euclidean Clustering +bool hsv_diff_condition(std::vector weights, double thres, const pcl::PointXYZHSV& p1, const pcl::PointXYZHSV& p2, float sq_dist){ + return weights[0]*(p1.h-p2.h)*(p1.h-p2.h)+weights[1]*(p1.s-p2.s)*(p1.s-p2.s)+weights[2]*(p1.v-p2.v)*(p1.v-p2.v) < thres*thres; +} + +double line_range_penalty(int a, int b, int x){ + // Average of squared distances from each point in the range + return (a<=x && x<=b)?0:(pow(b-x,3)-pow(x-a,3))/(b-a); +} + +// The penalty function that compares an HSV color value to an HSV color range +double color_range_penalty(std::vector weights, std::vector color_range, std::vector pt_color){ + double h_min=color_range[0], h_max=color_range[1], s_min=color_range[2], s_max=color_range[3], v_min=color_range[4], v_max=color_range[5]; + return weights[0]*line_range_penalty(h_min, h_max, pt_color[0]) + weights[1]*line_range_penalty(s_min, s_max, pt_color[1]) + weights[2]*line_range_penalty(v_min, v_max, pt_color[2]); +} + +cv::Vec3b int_to_bgr(double i, double k){ + cv::Mat hsvimg(1,1,CV_8UC3,cv::Vec3b(i/k*180,255,255)); + cv::Mat bgrimg; + cv::cvtColor(hsvimg, bgrimg, cv::COLOR_HSV2BGR); + return bgrimg.at(0,0); +} + +void BBoxProjectPCloud::bb_pcl_project( + const sensor_msgs::msg::Image::ConstSharedPtr &in_img_msg, + const sensor_msgs::msg::PointCloud2::ConstSharedPtr &in_cloud_msg, + const all_seaing_interfaces::msg::LabeledBoundingBox2DArray::ConstSharedPtr &in_bbox_msg) { + RCLCPP_DEBUG(this->get_logger(), "GOT DATA"); + // LIDAR -> Camera transform (useful for projecting the camera bboxes onto the point cloud, have the origin on the camera frame) + if (!m_pc_cam_tf_ok) + m_pc_cam_tf = get_tf(in_img_msg->header.frame_id, in_cloud_msg->header.frame_id); + + // Transform in_cloud_msg to the camera frame and convert PointCloud2 to PCL PointCloud + sensor_msgs::msg::PointCloud2 in_cloud_tf; + tf2::doTransform(*in_cloud_msg, in_cloud_tf, m_pc_cam_tf); + pcl::PointCloud::Ptr in_cloud_tf_ptr(new pcl::PointCloud); + pcl::fromROSMsg(in_cloud_tf, *in_cloud_tf_ptr); + + RCLCPP_DEBUG(this->get_logger(), "%d POINTS, %d OBJECTS", in_cloud_tf_ptr->points.size(), in_bbox_msg->boxes.size()); + RCLCPP_DEBUG(this->get_logger(), "FRAME_ID BEFORE SELECTION, AFTER TRANSFORM TO CAMERA FRAME: %s", in_cloud_tf_ptr->header.frame_id); + + auto object_pcls = all_seaing_interfaces::msg::LabeledObjectPointCloudArray(); + std::vector> obj_cloud_vec; + // int max_len = 0; + // Just use the same pcloud to image projection, but check if it's within some binding box and assign it to that detection + int obj = 0; + for (pcl::PointXYZI &point_tf : in_cloud_tf_ptr->points) { + // Project 3D point onto the image plane using the intrinsic matrix. + // Gazebo has a different coordinate system, so the y, z, and x coordinates are modified. + RCLCPP_DEBUG(this->get_logger(), "3D POINT: (%lf, %lf, %lf)", point_tf.x, point_tf.y, point_tf.z); + cv::Point2d xy_rect = m_cam_model.project3dToPixel(cv::Point3d(point_tf.y, point_tf.z, -point_tf.x)); + // Check if within bounds & in front of the boat + RCLCPP_DEBUG(this->get_logger(), "POINT PROJECTED ONTO IMAGE: (%lf, %lf)", xy_rect.x, xy_rect.y); + } + // Convert msg to CvImage to work with CV2. Copy img since we will be modifying. + cv_bridge::CvImagePtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvCopy(in_img_msg, sensor_msgs::image_encodings::BGR8); + } catch (cv_bridge::Exception &e) { + RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); + return; + } + //show received image + // cv::imshow("Received Image", cv_ptr->image); + // cv::waitKey(); + cv::Mat cv_hsv; + cv::cvtColor(cv_ptr->image, cv_hsv, cv::COLOR_BGR2HSV); + //show hsv (false color) image + // cv::imshow("HSV Image", cv_hsv); + // cv::waitKey(); + std::vector::Ptr>> bbox_pcloud_objects; + for (all_seaing_interfaces::msg::LabeledBoundingBox2D bbox : in_bbox_msg->boxes){ + auto labeled_pcl = all_seaing_interfaces::msg::LabeledObjectPointCloud(); + pcl::PointCloud::Ptr obj_cloud_ptr(new pcl::PointCloud); + labeled_pcl.time = in_cloud_msg->header.stamp; + labeled_pcl.label = bbox.label; + obj_cloud_ptr->header = in_cloud_tf_ptr->header; + RCLCPP_DEBUG(this->get_logger(), "BOUNDING BOX FOR OBJECT %d: (%d,%d), (%d, %d)", obj, bbox.min_x, bbox.min_y, bbox.max_x, bbox.max_y); + for (pcl::PointXYZI &point_tf : in_cloud_tf_ptr->points) { + // Project 3D point onto the image plane using the intrinsic matrix. + // Gazebo has a different coordinate system, so the y, z, and x coordinates are modified. + cv::Point2d xy_rect = m_cam_model.project3dToPixel(cv::Point3d(point_tf.y, point_tf.z, -point_tf.x)); + // Check if within bounds & in front of the boat + if ((xy_rect.x >= 0) && (xy_rect.x < m_cam_model.cameraInfo().width) && (xy_rect.y >= 0) && + (xy_rect.y < m_cam_model.cameraInfo().height) && (point_tf.x >= 0)) { + double bbox_margin = this->get_parameter("bbox_object_margin").as_double(); + // Check if point is in bbox + if(xy_rect.x >= bbox.min_x-bbox_margin && xy_rect.x <= bbox.max_x+bbox_margin && xy_rect.y >= bbox.min_y-bbox_margin && xy_rect.y <= bbox.max_y+bbox_margin){ + cv::Vec3b hsv_vec3b = cv_hsv.at(xy_rect); + std::vector hsv = {(long long)hsv_vec3b[0], (long long)hsv_vec3b[1], (long long)hsv_vec3b[2]}; + // cv::Vec3b bgr = cv_ptr->image.at(xy_rect); + obj_cloud_ptr->push_back(pcl::PointXYZHSV(point_tf.x, point_tf.y, point_tf.z, ((float)hsv[0])*2, ((float)hsv[1])/((float)255.0), ((float)hsv[2])/((float)255.0))); + // obj_cloud_ptr->push_back(pcl::PointXYZRGB(point_tf.x, point_tf.y, point_tf.z, bgr[2], bgr[1], bgr[0])); + RCLCPP_DEBUG(this->get_logger(), "SELECTED HSV POINT PROJECTED ONTO IMAGE: (%lf, %lf) -> (%d, %d, %d)", xy_rect.x, xy_rect.y, hsv[0], hsv[1], hsv[2]); + // RCLCPP_DEBUG(this->get_logger(), "SELECTED RGB POINT PROJECTED ONTO IMAGE: (%lf, %lf) -> (%d, %d, %d)", xy_rect.x, xy_rect.y, bgr[2], bgr[1], bgr[0]); + } + } + } + pcl::toROSMsg(*obj_cloud_ptr, labeled_pcl.cloud); + labeled_pcl.cloud.header.stamp = in_cloud_msg->header.stamp; + object_pcls.objects.push_back(labeled_pcl); + obj_cloud_vec.push_back(*obj_cloud_ptr); + bbox_pcloud_objects.push_back(std::make_pair(bbox, obj_cloud_ptr)); + obj++; + RCLCPP_DEBUG(this->get_logger(), "%d POINTS IN OBJECT %d", obj_cloud_ptr->size(), obj); + // max_len = std::max(max_len, (int)obj_cloud_ptr->size()); + } + RCLCPP_DEBUG(this->get_logger(), "WILL NOW SEND OBJECT POINT CLOUDS"); + m_object_pcl_pub->publish(object_pcls); + pcl::PointCloud::Ptr all_obj_pcls_ptr(new pcl::PointCloud); + // pcl::PointCloud::Ptr all_obj_pcls_ptr(new pcl::PointCloud); + all_obj_pcls_ptr->header = in_cloud_tf_ptr->header; + //convert vector of PointCloud to a single PointCloud with channels + // all_obj_pcls_ptr->resize((pcl::uindex_t)max_len, (pcl::uindex_t)in_bbox_msg->boxes.size()); + // RCLCPP_DEBUG(this->get_logger(), "PUBLISHED PCLOUD DIMENSIONS: height: %d, width: %d", (int)all_obj_pcls_ptr->height, (int)all_obj_pcls_ptr->width); + // RCLCPP_DEBUG(this->get_logger(), "STORED PCLOUD DIMENSIONS: objects: %d, max_length: %d", (int)obj_cloud_vec.size(), max_len); + try{ + for(int i = 0; iat(j,i) = obj_cloud_vec[i][j]; + all_obj_pcls_ptr->push_back(obj_cloud_vec[i][j]); + } + } + }catch(std::exception &ex){ + RCLCPP_ERROR(this->get_logger(), "PROJECT BBOX PCLOUD PUBLISHING ERROR: %s", ex.what()); + } + auto obj_pcls_msg = sensor_msgs::msg::PointCloud2(); + pcl::toROSMsg(*all_obj_pcls_ptr, obj_pcls_msg); + obj_pcls_msg.header.stamp = in_cloud_msg->header.stamp; + m_object_pcl_viz_pub->publish(obj_pcls_msg); + + // REFINE OBJECT POINT CLOUDS + auto refined_objects_pub = all_seaing_interfaces::msg::LabeledObjectPointCloudArray(); + std::vector, std::vector>> refined_cloud_contour_vec; + // int max_refined_len = 0; + + RCLCPP_DEBUG(this->get_logger(), "# OF FOUND BBOXES: %d", bbox_pcloud_objects.size()); + for(auto bbox_pcloud_pair : bbox_pcloud_objects){ + all_seaing_interfaces::msg::LabeledBoundingBox2D bbox = bbox_pcloud_pair.first; + pcl::PointCloud::Ptr pcloud_ptr = bbox_pcloud_pair.second; + // pcl::PointCloud::Ptr pcloud_ptr = bbox_pcloud_pair.second; + // pcl::PointCloud::Ptr pcloud_ptr_hsv(new pcl::PointCloud); + // // CONVERT RGB POINTS TO HSV + // for(auto pt : pcloud_ptr->points){ + // pcl::PointXYZHSV pt_hsv; + // pcl::PointXYZRGBtoXYZHSV(*pt, pt_hsv); + // pcloud_ptr_hsv->push_back(pt_hsv); + // } + + //image & bbox relation & adjustment (make sure in-bounds) + RCLCPP_DEBUG(this->get_logger(), "BBOX COORDS: (%d, %d), (%d, %d) in img of sz %d x %d", bbox.min_x, bbox.min_y, bbox.max_x+1, bbox.max_y+1, cv_hsv.cols, cv_hsv.rows); + if(bbox.min_x > bbox.max_x || bbox.min_y > bbox.max_y) continue; + //make sure bbox coords are inside image + int adj_min_x = std::max((int)bbox.min_x,0); + int adj_max_x = std::min((int)bbox.max_x+1, cv_hsv.cols); + int adj_min_y = std::max((int)bbox.min_y,0); + int adj_max_y = std::min((int)bbox.max_y+1, cv_hsv.rows); + RCLCPP_DEBUG(this->get_logger(), "ADJUSTED BBOX COORDS: (%d, %d), (%d, %d) in img of sz %d x %d", adj_min_x, adj_min_y, adj_max_x, adj_max_y, cv_hsv.cols, cv_hsv.rows); + cv::Mat cv_hsv_copy = cv_hsv.clone(); + cv::Mat hsv_img(cv_hsv, cv::Range(adj_min_y, adj_max_y), cv::Range(adj_min_x, adj_max_x)); + cv::Size img_sz; + cv::Point bbox_offset; + hsv_img.locateROI(img_sz, bbox_offset); + RCLCPP_DEBUG(this->get_logger(), "IMAGE SIZE: (%d, %d)", img_sz.height, img_sz.width); + RCLCPP_DEBUG(this->get_logger(), "BBOX OFFSET: (%d, %d)", bbox_offset.x, bbox_offset.y); + + //to display points inside contours, and the clusters projected (debugging) + cv::Mat cv_img_copy = cv_ptr->image.clone(); + cv::Mat mat_clusters(cv_img_copy, cv::Range(adj_min_y, adj_max_y), cv::Range(adj_min_x, adj_max_x)); + cv::Mat mat_contours = mat_clusters.clone(); + cv::Mat mat_opt_cluster = mat_clusters.clone(); + cv::Mat mat_opt_contour = mat_contours.clone(); + + //extract clusters + pcl::search::KdTree::Ptr tree(new pcl::search::KdTree()); + // pcl::search::KdTree::Ptr tree(new pcl::search::KdTree); + if (!pcloud_ptr->points.empty()) + tree->setInputCloud(pcloud_ptr); + std::vector clusters_indices; + + // CONDITIONAL (WITH HSV-BASED CONDITION) EUCLIDEAN CLUSTERING + // TODO: USE AN ALGORITHM THAT MAKES SURE THE CONTOUR IS CONTINUOUS (WITHOUT NON-SELECTED POINTS INSIDE IT, AS IT'S PROBABLY THE CASE NOW) + //(though seems to work pretty well as it is now) + pcl::ConditionalEuclideanClustering cec; + cec.setClusterTolerance(m_clustering_distance); + cec.setMinClusterSize(m_obstacle_size_min); + cec.setMaxClusterSize(m_obstacle_size_max); + cec.setSearchMethod(tree); + cec.setInputCloud(pcloud_ptr); + std::function cond_func = std::bind(&hsv_diff_condition, m_clustering_color_weights, m_clustering_color_thres, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3); + cec.setConditionFunction(cond_func); + cec.segment(clusters_indices); + RCLCPP_DEBUG(this->get_logger(), "# OF EXTRACTED CLUSTERS: %d", clusters_indices.size()); + int clust_id = 0; + for(auto ind_set : clusters_indices){ + RCLCPP_DEBUG(this->get_logger(), "SIZE OF CLUSTER %d: %d", clust_id, ind_set.indices.size()); + for(pcl::index_t ind : ind_set.indices){ + cv::Point2d cloud_pt_xy = m_cam_model.project3dToPixel(cv::Point3d(pcloud_ptr->points[ind].y, pcloud_ptr->points[ind].z, -pcloud_ptr->points[ind].x)); + mat_clusters.at((cv::Point)cloud_pt_xy-bbox_offset) = int_to_bgr(clust_id, clusters_indices.size()); + } + clust_id++; + } + //show received bbox + // cv::Mat bgr_bbox; + // cv::cvtColor(hsv_img, bgr_bbox, cv::COLOR_HSV2BGR); + // cv::imshow("BBox", bgr_bbox); + // cv::waitKey(); + + // color segmentation using the color label + std::vector lims = label_color_map[bbox.label]=="red"?contour_matching_color_range_map["red2"]:contour_matching_color_range_map[label_color_map[bbox.label]]; + int h_min=lims[0], h_max=lims[1], s_min=lims[2], s_max=lims[3], v_min=lims[4], v_max=lims[5]; + RCLCPP_DEBUG(this->get_logger(), "COLOR: %s, & SEGMENTATION COLOR LIMITS-> H: %d - %d, S: %d - %d, V: %d - %d", label_color_map[bbox.label].c_str(), h_min, h_max, s_min, s_max, v_min, v_max); + cv::Scalar bbox_mean_hsv = cv::mean(hsv_img); + RCLCPP_DEBUG(this->get_logger(), "MEAN BBOX IMAGE COLOR: (%lf, %lf, %lf)", bbox_mean_hsv[0], bbox_mean_hsv[1], bbox_mean_hsv[2]); + cv::Mat mask; + cv::inRange(hsv_img, cv::Scalar(h_min, s_min, v_min), cv::Scalar(h_max, s_max, v_max), mask); + //show mask + // cv::imshow("Mask", mask); + // cv::waitKey(); + cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5,5))); + cv::dilate(mask, mask, cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7,7))); + std::vector> contours; + cv::findContours(mask, contours, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE); + std::vector> in_contours; + //Get image points inside contour, put them into a vector, will then process those (not the contours themselves, they are just a boundary) + for(std::vector contour : contours){ + //TAKE THE BOUNDING BOX (BOUNDINGRECT), TAKE ALL THE PIXELS IN IT (NOT NOT CHECK EVERY PIXEL IN THE IMAGE) AND CHECK IF IT'S INSIDE THE CONTOUR USING POINTPOLYGONTEST + cv::Rect cont_bbox = cv::boundingRect(contour); + std::vector in_contour; + for(int x = cont_bbox.x; x < cont_bbox.x + cont_bbox.width; x++){ + for(int y = cont_bbox.y; y < cont_bbox.y + cont_bbox.height; y++){ + cv::Point pt = cv::Point(x,y); + if(cv::pointPolygonTest(contour, pt, false)>=0){ + in_contour.push_back(pt); + } + } + } + in_contours.push_back(in_contour); + } + RCLCPP_DEBUG(this->get_logger(), "# OF FOUND CONTOURS: %d", in_contours.size()); + // Convert the contour points to fit the original image (using image_sz and bbox_offset -> just bbox_offset+pt_coords) to be able to use it with the LiDAR (projected) points + for(int ctr = 0; ctr < in_contours.size(); ctr++){ + RCLCPP_DEBUG(this->get_logger(), "SIZE OF CONTOUR %d: %d", ctr, in_contours[ctr].size()); + for(int pt=0; pt < in_contours[ctr].size(); pt++){ + in_contours[ctr][pt]+=bbox_offset; + mat_contours.at(in_contours[ctr][pt]-bbox_offset) = int_to_bgr(ctr, in_contours.size()); + } + } + if(in_contours.empty() || clusters_indices.empty()) continue; + // Convert both the pcloud and the contours to a vector of pair and compute sum and sum of squares (can be used to compute all the needed metrics) + std::vector>>> contours_pts; + std::vector>, std::pair>>> contours_qts; + for (std::vector in_contour : in_contours){// point.x, point.y the coords + std::vector>> contour_pts; + std::pair>, std::pair>> contour_qts; + contour_qts.first.second = contour_qts.second.second = {0,0,0}; + for(cv::Point image_pt : in_contour){ + cv::Vec3b image_pt_hsv_vec3b = cv_hsv.at(image_pt); + std::vector image_pt_hsv = {(long long)image_pt_hsv_vec3b[0], (long long)image_pt_hsv_vec3b[1], (long long)image_pt_hsv_vec3b[2]}; + RCLCPP_DEBUG(this->get_logger(), "IMAGE POINT COLOR AT POINT (%d, %d) (size: %d, %d): (%d, %d, %d)", image_pt.x, image_pt.y, cv_hsv.cols, cv_hsv.rows, image_pt_hsv[0], image_pt_hsv[1], image_pt_hsv[2]); + cv::Point2d image_pt_xy = cv::Point2d(image_pt.x, image_pt.y); + contour_pts.push_back(std::make_pair(image_pt_xy, image_pt_hsv)); + RCLCPP_DEBUG(this->get_logger(), "IMAGE H SUM/SQUARED BEFORE POINT UPDATE: %lld/%lld", contour_qts.first.second[0], contour_qts.second.second[0]); + //store sums + contour_qts.first.first.x+=image_pt_xy.x; + contour_qts.first.first.y+=image_pt_xy.y; + contour_qts.first.second[0]+=image_pt_hsv[0]; + contour_qts.first.second[1]+=image_pt_hsv[1]; + contour_qts.first.second[2]+=image_pt_hsv[2]; + //store sum of squares + contour_qts.second.first.x+=image_pt_xy.x*image_pt_xy.x; + contour_qts.second.first.y+=image_pt_xy.y*image_pt_xy.y; + contour_qts.second.second[0]+=image_pt_hsv[0]*image_pt_hsv[0]; + contour_qts.second.second[1]+=image_pt_hsv[1]*image_pt_hsv[1]; + contour_qts.second.second[2]+=image_pt_hsv[2]*image_pt_hsv[2]; + RCLCPP_DEBUG(this->get_logger(), "IMAGE H SUM/SQUARED AFTER POINT UPDATE: %lld/%lld", contour_qts.first.second[0], contour_qts.second.second[0]); + } + contours_pts.push_back(contour_pts); + contours_qts.push_back(contour_qts); + } + std::vector>>> clusters_pts; + std::vector>, std::pair>>> clusters_qts; + for (pcl::PointIndices cluster : clusters_indices){// vector is cluster.indices + std::vector>> cluster_pts; + std::pair>, std::pair>> cluster_qts; + cluster_qts.first.second = cluster_qts.second.second = {0,0,0}; + for(pcl::index_t ind : cluster.indices){ + RCLCPP_DEBUG(this->get_logger(), "INITIAL CLUSTER POINT COLOR: (%lf, %lf, %lf)", pcloud_ptr->points[ind].h, pcloud_ptr->points[ind].s, pcloud_ptr->points[ind].v); + //transform to OpenCV HSV + std::vector cloud_pt_hsv = {pcloud_ptr->points[ind].h/2, pcloud_ptr->points[ind].s*((float)255.0), pcloud_ptr->points[ind].v*((float)255.0)}; + RCLCPP_DEBUG(this->get_logger(), "CONVERTED CLUSTER POINT COLOR: (%d, %d, %d)", cloud_pt_hsv[0], cloud_pt_hsv[1], cloud_pt_hsv[2]); + cv::Point2d cloud_pt_xy = m_cam_model.project3dToPixel(cv::Point3d(pcloud_ptr->points[ind].y, pcloud_ptr->points[ind].z, -pcloud_ptr->points[ind].x)); + cluster_pts.push_back(std::make_pair(cloud_pt_xy, cloud_pt_hsv)); + //store sums + cluster_qts.first.first.x+=cloud_pt_xy.x; + cluster_qts.first.first.y+=cloud_pt_xy.y; + cluster_qts.first.second[0]+=cloud_pt_hsv[0]; + cluster_qts.first.second[1]+=cloud_pt_hsv[1]; + cluster_qts.first.second[2]+=cloud_pt_hsv[2]; + //store sum of squares + cluster_qts.second.first.x+=cloud_pt_xy.x*cloud_pt_xy.x; + cluster_qts.second.first.y+=cloud_pt_xy.y*cloud_pt_xy.y; + cluster_qts.second.second[0]+=cloud_pt_hsv[0]*cloud_pt_hsv[0]; + cluster_qts.second.second[1]+=cloud_pt_hsv[1]*cloud_pt_hsv[1]; + cluster_qts.second.second[2]+=cloud_pt_hsv[2]*cloud_pt_hsv[2]; + } + clusters_pts.push_back(cluster_pts); + clusters_qts.push_back(cluster_qts); + } + + // GO THROUGH ALL THE CLUSTER/CONTOUR PAIRS AND FIND THE BEST ONE BASED ON THE OPTIMALITY METRIC WITH THE WEIGHTS + long long min_pair_cost = -1; + int opt_contour_id = -1, opt_cluster_id = -1; + for (int contour = 0; contour < contours_pts.size(); contour++){ + long long contour_size = contours_pts[contour].size(); + std::pair>, std::pair>> contour_qts = contours_qts[contour]; + + long long contour_cost = 0; + for (std::pair> contour_pt : contours_pts[contour]){ + contour_cost += (long long)color_range_penalty(m_contour_detection_color_weights, label_color_map[bbox.label]=="red"?contour_matching_color_range_map["red2"]:contour_matching_color_range_map[label_color_map[bbox.label]], contour_pt.second)/contour_size; + } + for (int cluster = 0; cluster < clusters_pts.size(); cluster++){ + long long cluster_size = clusters_pts[cluster].size(); + std::pair>, std::pair>> cluster_qts = clusters_qts[cluster]; + + //sum(cluster_size*x_i^2+contour_size*x_j^2) -2*sum(x_i*x_j) +sum(cluster_size*y_i^2+contour_size*y_j^2) -2*sum(y_i*y_j)) + long long fast_contour_cluster_sq_dist_sum = cluster_size*contour_qts.second.first.x+contour_size*cluster_qts.second.first.x\ + - 2*contour_qts.first.first.x*cluster_qts.first.first.x\ + + cluster_size*contour_qts.second.first.y+contour_size*cluster_qts.second.first.y\ + - 2*contour_qts.first.first.y*cluster_qts.first.first.y; + long long contour_cluster_dist_ms = fast_contour_cluster_sq_dist_sum/(cluster_size*contour_size); + + std::vector avg_contour_col = {contour_qts.first.second[0]/contour_size, contour_qts.first.second[1]/contour_size, contour_qts.first.second[2]/contour_size}; + + long long fast_cluster_sq_dh_sum = cluster_qts.second.second[0]-2*cluster_qts.first.second[0]*avg_contour_col[0]+cluster_size*avg_contour_col[0]*avg_contour_col[0];//sum((h_i-m_h)^2=h_i^2-2*h_i*m_h+cluster_size*m_h^2) + long long fast_cluster_sq_ds_sum = cluster_qts.second.second[1]-2*cluster_qts.first.second[1]*avg_contour_col[1]+cluster_size*avg_contour_col[1]*avg_contour_col[1];//same for s + long long fast_cluster_sq_dv_sum = cluster_qts.second.second[2]-2*cluster_qts.first.second[2]*avg_contour_col[2]+cluster_size*avg_contour_col[2]*avg_contour_col[2];//same for v + + long long cluster_dh_ms = fast_cluster_sq_dh_sum/cluster_size; + long long cluster_ds_ms = fast_cluster_sq_ds_sum/cluster_size; + long long cluster_dv_ms = fast_cluster_sq_dv_sum/cluster_size; + + long long pair_cost = m_cluster_contour_distance_weight*contour_cluster_dist_ms\ + + m_cluster_contour_color_weights[0]*cluster_dh_ms\ + + m_cluster_contour_color_weights[1]*cluster_ds_ms\ + + m_cluster_contour_color_weights[2]*cluster_dv_ms\ + - m_cluster_contour_size_weight*cluster_size*contour_size\ + + contour_cost; + if (min_pair_cost == -1 || pair_cost < min_pair_cost){ + min_pair_cost = pair_cost; + opt_contour_id = contour; + opt_cluster_id = cluster; + } + } + } + + auto refined_pcl_segments = all_seaing_interfaces::msg::LabeledObjectPointCloud(); + refined_pcl_segments.time = in_cloud_msg->header.stamp; + refined_pcl_segments.label = bbox.label; + pcl::PointCloud::Ptr refined_cloud_ptr(new pcl::PointCloud()); + refined_cloud_ptr->header = pcloud_ptr->header; + cv::Mat refined_obj_contour_mat = cv::Mat::zeros(img_sz, CV_8UC3); + for (cv::Point image_pt : in_contours[opt_contour_id]){ + refined_obj_contour_mat.at(image_pt) = cv::Vec3b(255,255,255); + mat_opt_contour.at(image_pt-bbox_offset) = int_to_bgr(opt_contour_id, in_contours.size()); + } + for (pcl::index_t ind : clusters_indices[opt_cluster_id].indices){ + pcl::PointXYZHSV pt = pcloud_ptr->points[ind]; + refined_cloud_ptr->push_back(pt); + cv::Point2d cloud_pt_xy = m_cam_model.project3dToPixel(cv::Point3d(pt.y, pt.z, -pt.x)); + mat_opt_cluster.at((cv::Point)cloud_pt_xy-bbox_offset) = int_to_bgr(opt_cluster_id, clusters_indices.size()); + } + pcl::toROSMsg(*refined_cloud_ptr, refined_pcl_segments.cloud); + refined_pcl_segments.cloud.header.stamp = in_cloud_msg->header.stamp; + cv_bridge::CvImagePtr refined_obj_contour_ptr(new cv_bridge::CvImage(in_img_msg->header, sensor_msgs::image_encodings::TYPE_8UC3, refined_obj_contour_mat)); + // cv::imshow("Object contour image to be published:", refined_obj_contour_mat); + // cv::waitKey(); + refined_pcl_segments.segment = *refined_obj_contour_ptr->toImageMsg(); + refined_objects_pub.objects.push_back(refined_pcl_segments); + refined_cloud_contour_vec.push_back(std::make_pair(*refined_cloud_ptr,in_contours[opt_contour_id])); + // max_refined_len = std::max(max_refined_len, (int)refined_cloud_ptr->size()); + + //show clusters & contours & respective matching (split image) + // cv::Mat clust_cont_arr[] = {mat_clusters, mat_contours}; + // cv::Mat opt_clust_cont_arr[] = {mat_opt_cluster, mat_opt_contour}; + // cv::Mat clust_cont; + // cv::Mat opt_clust_cont; + // cv::hconcat(clust_cont_arr, 2, clust_cont); + // cv::hconcat(opt_clust_cont_arr, 2, opt_clust_cont); + // cv::Mat clust_cont_all_arr[] = {clust_cont, opt_clust_cont}; + // cv::Mat clust_cont_all; + // cv::vconcat(clust_cont_all_arr, 2, clust_cont_all); + // cv::Mat upscaled; + // cv::resize(clust_cont_all, upscaled, cv::Size(), 5, 5, cv::INTER_CUBIC); + // cv::imshow("Clusters & contours & matching", upscaled); + // cv::waitKey(); + } + RCLCPP_DEBUG(this->get_logger(), "WILL NOW SEND REFINED OBJECT POINT CLOUDS & CONTOURS"); + m_refined_object_pcl_segment_pub->publish(refined_objects_pub); + RCLCPP_DEBUG(this->get_logger(), "PUBLISHED REFINED OBJECT POINT CLOUDS & CONTOURS"); + pcl::PointCloud::Ptr all_obj_refined_pcls_ptr(new pcl::PointCloud()); + all_obj_refined_pcls_ptr->header = in_cloud_tf_ptr->header; + //convert vector of PointCloud to a single PointCloud with channels + // all_obj_refined_pcls_ptr->resize((pcl::uindex_t)max_refined_len, (pcl::uindex_t)refined_cloud_contour_vec.size()); + cv::Mat all_obj_refined_contours = cv::Mat::zeros(cv_ptr->image.size(), CV_8UC3); + try{ + for(int i = 0; iget_logger(), "BBOX %d/%d", i, refined_cloud_contour_vec.size()); + for(int j = 0; jat(j,i) = refined_cloud_contour_vec[i].first[j]; + all_obj_refined_pcls_ptr->push_back(refined_cloud_contour_vec[i].first[j]); + } + for(cv::Point pt : refined_cloud_contour_vec[i].second){ + all_obj_refined_contours.at(pt)=cv::Vec3b(255, 255, 255); + } + } + }catch(std::exception &ex){ + RCLCPP_ERROR(this->get_logger(), "REFINED POINT CLOUD PUBLISHING ERROR: %s", ex.what()); + } + RCLCPP_DEBUG(this->get_logger(), "STORED CONTOURS TO BE PUBLISHED"); + auto obj_refined_pcls_msg = sensor_msgs::msg::PointCloud2(); + pcl::toROSMsg(*all_obj_refined_pcls_ptr, obj_refined_pcls_msg); + obj_refined_pcls_msg.header.stamp = in_cloud_msg->header.stamp; + m_refined_object_pcl_viz_pub->publish(obj_refined_pcls_msg); + cv_bridge::CvImagePtr all_obj_refined_contour_ptr(new cv_bridge::CvImage(in_img_msg->header, sensor_msgs::image_encodings::TYPE_8UC3, all_obj_refined_contours)); + // cv::imshow("Object contour image to be published:", all_obj_refined_contours); + // cv::waitKey(); + m_refined_object_segment_viz_pub->publish(*all_obj_refined_contour_ptr->toImageMsg()); + RCLCPP_DEBUG(this->get_logger(), "SENT OBJECT POINT CLOUDS FOR VISUALIZATION"); +} + +geometry_msgs::msg::TransformStamped BBoxProjectPCloud::get_tf(const std::string &in_target_frame, + const std::string &in_src_frame) { + geometry_msgs::msg::TransformStamped tf; + m_pc_cam_tf_ok = false; + try { + tf = m_tf_buffer->lookupTransform(in_target_frame, in_src_frame, tf2::TimePointZero); + m_pc_cam_tf_ok = true; + RCLCPP_INFO(this->get_logger(), "LiDAR to Camera Transform good"); + RCLCPP_INFO(this->get_logger(), "in_target_frame: %s, in_src_frame: %s", + in_target_frame.c_str(), in_src_frame.c_str()); + } catch (tf2::TransformException &ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + } + return tf; +} + +BBoxProjectPCloud::~BBoxProjectPCloud() {} + +int main(int argc, char *argv[]) { + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared()); + rclcpp::shutdown(); + return 0; +} \ No newline at end of file diff --git a/all_seaing_perception/all_seaing_perception/color_segmentation.py b/all_seaing_perception/all_seaing_perception/color_segmentation.py index c0d573ef..374e924b 100755 --- a/all_seaing_perception/all_seaing_perception/color_segmentation.py +++ b/all_seaing_perception/all_seaing_perception/color_segmentation.py @@ -46,10 +46,10 @@ def img_callback(self, img): bboxes.header = img.header try: - img = self.bridge.imgmsg_to_cv2(img, "rgb8") + img = self.bridge.imgmsg_to_cv2(img, "bgr8") except cv_bridge.CvBridgeError as e: self.get_logger().info(str(e)) - hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) + hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) colors = self.colors label_dict = self.label_dict @@ -104,7 +104,7 @@ def img_callback(self, img): 4, ) - self.img_pub.publish(self.bridge.cv2_to_imgmsg(img, "rgb8")) + self.img_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8")) self.bbox_pub.publish(bboxes) diff --git a/all_seaing_perception/all_seaing_perception/object_tracking_map.cpp b/all_seaing_perception/all_seaing_perception/object_tracking_map.cpp new file mode 100644 index 00000000..835f23fa --- /dev/null +++ b/all_seaing_perception/all_seaing_perception/object_tracking_map.cpp @@ -0,0 +1,295 @@ +#include "all_seaing_perception/object_tracking_map.hpp" + +ObjectTrackingMap::ObjectTrackingMap() : Node("object_tracking_map"){ + // Initialize parameters + this->declare_parameter("global_frame_id", "odom"); + this->declare_parameter("obstacle_seg_thresh", 1.0); + this->declare_parameter("obstacle_drop_thresh", 1.0); + + // Initialize member variables from parameters + m_global_frame_id = this->get_parameter("global_frame_id").as_string(); + m_obstacle_seg_thresh = this->get_parameter("obstacle_seg_thresh").as_double(); + m_obstacle_drop_thresh = this->get_parameter("obstacle_drop_thresh").as_double(); + RCLCPP_DEBUG(this->get_logger(), "OBSTACLE SEG THRESHOLD: %lf, DROP THRESHOLD: %lf", m_obstacle_seg_thresh, m_obstacle_drop_thresh); + // Initialize navigation variables to 0 + m_nav_x = 0; + m_nav_y = 0; + m_nav_heading = 0; + + // Initialize publishers and subscribers + m_untracked_map_pub = + this->create_publisher("obstacle_map/refined_untracked", 10); + m_tracked_map_pub = this->create_publisher( + "obstacle_map/refined_tracked", 10); + m_object_sub = this->create_subscription( + "refined_object_point_clouds_segments", rclcpp::SensorDataQoS(), + std::bind(&ObjectTrackingMap::object_track_map_publish, this, std::placeholders::_1)); + m_odom_sub = this->create_subscription( + "odometry/filtered", 10, + std::bind(&ObjectTrackingMap::odom_callback, this, std::placeholders::_1)); + m_image_intrinsics_sub = this->create_subscription( + "camera_info_topic", 10, std::bind(&ObjectTrackingMap::intrinsics_cb, this, std::placeholders::_1)); +} + +ObjectCloud::ObjectCloud(rclcpp::Time t, int l, pcl::PointCloud::Ptr loc, pcl::PointCloud::Ptr glob){ + id = -1; + label = l; + time_seen = t; + last_dead = rclcpp::Time(0); + time_dead = rclcpp::Duration(0,0); + is_dead = false; + global_pcloud_ptr = glob; + for(pcl::PointXYZHSV &global_pt : global_pcloud_ptr->points){ + global_centroid.x += global_pt.x/((double)global_pcloud_ptr->points.size()); + global_centroid.y += global_pt.y/((double)global_pcloud_ptr->points.size()); + global_centroid.z += global_pt.z/((double)global_pcloud_ptr->points.size()); + } + this->update_loc_pcloud(loc); +} + +void ObjectCloud::update_loc_pcloud(pcl::PointCloud::Ptr loc){ + local_pcloud_ptr = loc; + for(pcl::PointXYZHSV &local_pt : local_pcloud_ptr->points){ + local_centroid.x += local_pt.x/((double)local_pcloud_ptr->points.size()); + local_centroid.y += local_pt.y/((double)local_pcloud_ptr->points.size()); + local_centroid.z += local_pt.z/((double)local_pcloud_ptr->points.size()); + } +} + +void ObjectTrackingMap::odom_callback(const nav_msgs::msg::Odometry &msg) { + m_nav_x = msg.pose.pose.position.x; + m_nav_y = msg.pose.pose.position.y; + tf2::Quaternion q; + q.setW(msg.pose.pose.orientation.w); + q.setX(msg.pose.pose.orientation.x); + q.setY(msg.pose.pose.orientation.y); + q.setZ(msg.pose.pose.orientation.z); + tf2::Matrix3x3 m(q); + double r, p, y; + m.getRPY(r, p, y); + m_nav_heading = y; + + m_nav_pose = msg.pose.pose; +} + +void ObjectTrackingMap::intrinsics_cb(const sensor_msgs::msg::CameraInfo &info_msg) { + RCLCPP_DEBUG(this->get_logger(), "GOT CAMERA INFO"); + m_cam_model.fromCameraInfo(info_msg); +} + +geometry_msgs::msg::TransformStamped ObjectTrackingMap::get_tf(const std::string &in_target_frame, + const std::string &in_src_frame) { + geometry_msgs::msg::TransformStamped tf; + m_pc_cam_tf_ok = false; + try { + tf = m_tf_buffer->lookupTransform(in_target_frame, in_src_frame, tf2::TimePointZero); + m_pc_cam_tf_ok = true; + RCLCPP_INFO(this->get_logger(), "LiDAR to Camera Transform good"); + RCLCPP_INFO(this->get_logger(), "in_target_frame: %s, in_src_frame: %s", + in_target_frame.c_str(), in_src_frame.c_str()); + } catch (tf2::TransformException &ex) { + RCLCPP_ERROR(this->get_logger(), "%s", ex.what()); + } + return tf; +} + +template +T ObjectTrackingMap::convert_to_global(double nav_x, double nav_y, double nav_heading, T point) { + T new_point = point;//to keep color-related data + double magnitude = std::hypot(point.x, point.y); + double point_angle = std::atan2(point.y, point.x); + new_point.x = nav_x + std::cos(nav_heading + point_angle) * magnitude; + new_point.y = nav_y + std::sin(nav_heading + point_angle) * magnitude; + new_point.z = 0; + return new_point; +} + +//TODO: Check it actually works, individually from the other parts +template +T ObjectTrackingMap::convert_to_local(double nav_x, double nav_y, double nav_heading, T point) { + T new_point = point;//to keep color-related data + double diff_x = point.x - nav_x; + double diff_y = point.y - nav_y; + double magnitude = std::hypot(diff_x, diff_y); + double point_angle = std::atan2(diff_y, diff_x); + new_point.x = std::cos(point_angle - nav_heading) * magnitude; + new_point.y = std::sin(point_angle - nav_heading) * magnitude; + new_point.z = 0; + return new_point; +} + +void ObjectTrackingMap::object_track_map_publish(const all_seaing_interfaces::msg::LabeledObjectPointCloudArray::ConstSharedPtr &msg){ + if(msg->objects.size()==0) return; + + //compute global coordinates from camera frame point cloud detections + std::vector> detected_obstacles; + for(all_seaing_interfaces::msg::LabeledObjectPointCloud obj : msg->objects){ + pcl::PointCloud::Ptr local_obj_pcloud(new pcl::PointCloud); + pcl::PointCloud::Ptr global_obj_pcloud(new pcl::PointCloud); + pcl::fromROSMsg(obj.cloud, *local_obj_pcloud); + for(pcl::PointXYZHSV &pt : local_obj_pcloud->points){ + pcl::PointXYZHSV global_pt = this->convert_to_global(m_nav_x, m_nav_y, m_nav_heading, pt); + global_obj_pcloud->push_back(global_pt); + } + std::shared_ptr obj_cloud (new ObjectCloud(obj.time, obj.label, local_obj_pcloud, global_obj_pcloud)); + detected_obstacles.push_back(obj_cloud); + } + + // Make and publish untracked map + std::vector> untracked_obs; + std::vector untracked_labels; + for(std::shared_ptr det_obs : detected_obstacles){ + pcl::PointCloud::Ptr raw_cloud(new pcl::PointCloud); + for(pcl::PointXYZHSV pt : det_obs->local_pcloud_ptr->points){ + pcl::PointXYZRGB rgb_pt; + pcl::PointXYZI i_pt; + pcl::PointXYZHSVtoXYZRGB(pt, rgb_pt); + pcl::PointXYZRGBtoXYZI(rgb_pt, i_pt); + raw_cloud->push_back(i_pt); + } + std::vector ind(raw_cloud->size()); + std::iota (std::begin(ind), std::end(ind), 0); + std::shared_ptr untracked_ob( + new all_seaing_perception::Obstacle(raw_cloud, ind, m_obstacle_id++, + det_obs->time_seen, m_nav_x, m_nav_y, m_nav_heading)); + untracked_labels.push_back(det_obs->label); + untracked_obs.push_back(untracked_ob); + } + this->publish_map(msg->objects[0].cloud.header, "untracked", true, untracked_obs, m_untracked_map_pub, untracked_labels); + + /* + OBJECT TRACKING: + --Like in obstacle_detector.cpp, for each detected obstacle, find the closest (RMS point distance? or just centroids), + same-color, yet unmatched, detected obstacle, and assign it to it --> if above distance threshold for all mark it as new + --Update matched obstacles and add new ones (and assign them id) + --From each tracked & unmatched obstacle, check if it's in the robot's FoV --> those are the only ones that may be detected at that point + (subscribe to the camera_info topic and get the transform from LiDAR 3D to camera 2D plane and min/max image x,y, + project the local centroid (from its global centroid and get_local()-->also update it in the tracked obstacles) + of the obstacle onto the camera image plane and check if it's within the bounds, with some margin) + --For those objects, update the time they have been undetected in the FoV total (add time from prev appearance, time_seen, if it was in FoV and undetected then too) + --For unmatched obstacles that have been undetected in FoV for > some time threshold, remove them + */ + + // Match new obstacles with old ones, update them, and add new ones + std::unordered_set chosen_indices; + for(std::shared_ptr det_obs : detected_obstacles){ + float best_dist = m_obstacle_seg_thresh; + int best_match = -1; + for(int tracked_id = 0; tracked_id < m_tracked_obstacles.size(); tracked_id++){ + if (chosen_indices.count(tracked_id) || m_tracked_obstacles[tracked_id]->label != det_obs->label) + continue; + float curr_dist = pcl::euclideanDistance(m_tracked_obstacles[tracked_id]->global_centroid, + det_obs->global_centroid); + if (curr_dist < best_dist) { + best_match = tracked_id; + best_dist = curr_dist; + } + } + if(best_match >= 0){ + det_obs->id = m_tracked_obstacles[best_match]->id; + m_tracked_obstacles[best_match] = det_obs; + chosen_indices.insert(best_match); + }else{ + det_obs->id = m_obstacle_id++; + m_tracked_obstacles.push_back(det_obs); + } + } + + // Filter old obstacles + for (int tracked_id = 0; tracked_id < m_tracked_obstacles.size(); tracked_id++) { + if(chosen_indices.count(tracked_id)) continue; + // Update local points + pcl::PointCloud::Ptr upd_local_obj_pcloud(new pcl::PointCloud); + for(pcl::PointXYZHSV &global_pt : m_tracked_obstacles[tracked_id]->global_pcloud_ptr->points){ + upd_local_obj_pcloud->push_back(this->convert_to_local(m_nav_x, m_nav_y, m_nav_heading, global_pt)); + } + m_tracked_obstacles[tracked_id]->update_loc_pcloud(upd_local_obj_pcloud); + // Check if in FoV (which'll mean it's dead, at least temporarily) + geometry_msgs::msg::Point lidar_point; + lidar_point.x = m_tracked_obstacles[tracked_id]->local_centroid.x; + lidar_point.y = m_tracked_obstacles[tracked_id]->local_centroid.y; + lidar_point.z = m_tracked_obstacles[tracked_id]->local_centroid.z; + geometry_msgs::msg::Point camera_point = lidar_point;//ALREADY IN THE SAME FRAME, WAS TRANSFORMED BEFORE BEING PUBLISHED BY bbox_project_pcloud.cpp + cv::Point2d xy_rect = m_cam_model.project3dToPixel( + cv::Point3d(camera_point.y, camera_point.z, -camera_point.x)); + RCLCPP_DEBUG(this->get_logger(), "OBSTACLE ID %d (%lf, %lf, %lf)->(%lf, %lf)", m_tracked_obstacles[tracked_id]->id, lidar_point.x, lidar_point.y, lidar_point.z, xy_rect.x, xy_rect.y); + if ((xy_rect.x >= 0) && (xy_rect.x < m_cam_model.cameraInfo().width) && (xy_rect.y >= 0) && + (xy_rect.y < m_cam_model.cameraInfo().height) && (lidar_point.x >= 0)) { + // Dead + if(m_tracked_obstacles[tracked_id]->is_dead){ + // Was also dead before, add time dead + RCLCPP_DEBUG(this->get_logger(), "OBSTACLE ID %d TIME PERIOD FROM PREVIOUS DEAD: %lf - %lf", m_tracked_obstacles[tracked_id]->id, m_tracked_obstacles[tracked_id]->last_dead.seconds(), rclcpp::Time(msg->objects[0].time).seconds()); + m_tracked_obstacles[tracked_id]->time_dead = rclcpp::Time(msg->objects[0].time) - m_tracked_obstacles[tracked_id]->last_dead + m_tracked_obstacles[tracked_id]->time_dead; + RCLCPP_DEBUG(this->get_logger(), "OBSTACLE ID %d DEAD FOR %lf SECONDS, OBSTACLE DROP THRESHOLD: %lf", m_tracked_obstacles[tracked_id]->id, m_tracked_obstacles[tracked_id]->time_dead.seconds(), m_obstacle_drop_thresh); + if(m_tracked_obstacles[tracked_id]->time_dead.seconds() > m_obstacle_drop_thresh){ + RCLCPP_DEBUG(this->get_logger(), "OBSTACLE ID %d DROPPED", m_tracked_obstacles[tracked_id]->id); + m_tracked_obstacles.erase(m_tracked_obstacles.begin()+tracked_id); + tracked_id--; + continue; + } + } + m_tracked_obstacles[tracked_id]->is_dead = true; + m_tracked_obstacles[tracked_id]->last_dead = msg->objects[0].time; + }else{ + m_tracked_obstacles[tracked_id]->is_dead = false; + } + } + + // Publish map with tracked obstacles + std::vector> tracked_obs; + std::vector tracked_labels; + for(std::shared_ptr t_ob : m_tracked_obstacles){ + pcl::PointCloud::Ptr tracked_cloud(new pcl::PointCloud); + for(pcl::PointXYZHSV pt : t_ob->local_pcloud_ptr->points){ + pcl::PointXYZRGB rgb_pt; + pcl::PointXYZI i_pt; + pcl::PointXYZHSVtoXYZRGB(pt, rgb_pt); + pcl::PointXYZRGBtoXYZI(rgb_pt, i_pt); + tracked_cloud->push_back(i_pt); + } + std::vector ind(tracked_cloud->size()); + std::iota (std::begin(ind), std::end(ind), 0); + std::shared_ptr tracked_ob( + new all_seaing_perception::Obstacle(tracked_cloud, ind, t_ob->id, + t_ob->time_seen, m_nav_x, m_nav_y, m_nav_heading)); + tracked_labels.push_back(t_ob->label); + tracked_obs.push_back(tracked_ob); + } + this->publish_map(msg->objects[0].cloud.header, "tracked", true, tracked_obs, m_tracked_map_pub, tracked_labels); +} + +void ObjectTrackingMap::publish_map( + std_msgs::msg::Header local_header, std::string ns, bool is_labeled, + const std::vector> &map, + rclcpp::Publisher::SharedPtr pub, std::vector labels) { + + // Create global header + std_msgs::msg::Header global_header = std_msgs::msg::Header(); + global_header.frame_id = m_global_frame_id; + global_header.stamp = local_header.stamp; + + all_seaing_interfaces::msg::ObstacleMap map_msg; + map_msg.ns = ns; + map_msg.local_header = local_header; + map_msg.header = global_header; + map_msg.is_labeled = is_labeled; + for (unsigned int i = 0; i < map.size(); i++) { + all_seaing_interfaces::msg::Obstacle det_obstacle; + map[i]->to_ros_msg(local_header, global_header, det_obstacle); + if(is_labeled){ + det_obstacle.label = labels[i]; + } + map_msg.obstacles.push_back(det_obstacle); + } + map_msg.pose = m_nav_pose; + pub->publish(map_msg); +} + +ObjectTrackingMap::~ObjectTrackingMap() {} + +int main(int argc, char *argv[]) { + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared()); + rclcpp::shutdown(); + return 0; +} \ No newline at end of file diff --git a/all_seaing_perception/include/all_seaing_perception/bbox_project_pcloud.hpp b/all_seaing_perception/include/all_seaing_perception/bbox_project_pcloud.hpp new file mode 100644 index 00000000..9dfb5213 --- /dev/null +++ b/all_seaing_perception/include/all_seaing_perception/bbox_project_pcloud.hpp @@ -0,0 +1,133 @@ +#ifndef ALL_SEAING_PERCEPTION__BBOX_PROJECT_PCLOUD_HPP +#define ALL_SEAING_PERCEPTION__BBOX_PROJECT_PCLOUD_HPP + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "yaml-cpp/yaml.h" + +#include "rclcpp/rclcpp.hpp" + +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" + +#include "image_geometry/pinhole_camera_model.h" +#include "message_filters/subscriber.h" +#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/synchronizer.h" + +#include "geometry_msgs/msg/transform_stamped.hpp" +#include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/image.hpp" +#include "sensor_msgs/msg/point_cloud2.hpp" +#include "builtin_interfaces/msg/time.hpp" + +#include +#include +#include +#include +#include "pcl_conversions/pcl_conversions.h" +#include +#include +#include + +#include "cv_bridge/cv_bridge.h" + +#include "tf2_geometry_msgs/tf2_geometry_msgs.hpp" +#include "tf2_sensor_msgs/tf2_sensor_msgs.hpp" + +#include "all_seaing_interfaces/msg/labeled_bounding_box2_d_array.hpp" +#include "all_seaing_interfaces/msg/labeled_bounding_box2_d.hpp" +#include "all_seaing_interfaces/msg/labeled_object_point_cloud_array.hpp" +#include "all_seaing_interfaces/msg/labeled_object_point_cloud.hpp" + +#include +#include + +class BBoxProjectPCloud : public rclcpp::Node{ +private: + // Publishers and subscribers + rclcpp::Publisher::SharedPtr m_object_pcl_pub; + rclcpp::Publisher::SharedPtr m_object_pcl_viz_pub; + rclcpp::Publisher::SharedPtr m_refined_object_pcl_segment_pub; + rclcpp::Publisher::SharedPtr m_refined_object_pcl_viz_pub; + rclcpp::Publisher::SharedPtr m_refined_object_segment_viz_pub; + rclcpp::Subscription::SharedPtr m_image_intrinsics_sub; + message_filters::Subscriber m_image_sub; + message_filters::Subscriber m_cloud_sub; + message_filters::Subscriber m_bbox_sub; + + // Transform variables + std::shared_ptr m_tf_listener{nullptr}; + std::unique_ptr m_tf_buffer; + geometry_msgs::msg::TransformStamped m_pc_cam_tf; + bool m_pc_cam_tf_ok; + + // Intrinsics callback camera model variables + image_geometry::PinholeCameraModel m_cam_model; + + // Pointcloud-camera sync policies + typedef message_filters::sync_policies::ApproximateTime + PointCloudCamBBoxPolicy; + typedef message_filters::Synchronizer PointCloudCamBBoxSync; + std::shared_ptr m_pc_cam_bbox_sync; + + // Get intrinsic camera model information needed for projection + void intrinsics_cb(const sensor_msgs::msg::CameraInfo &info_msg); + + // Get transform from source frame to target frame + geometry_msgs::msg::TransformStamped get_tf(const std::string &in_target_frame, + const std::string &in_src_frame); + + // Get the point cloud from the LiDAR, the image from the camera (only to get the camera frame), + // and the bounding boxes of the objects detected from the YOLOv8 (or other object detector/color segmentator) node, + // and output a set of point clouds representing the points included in the bounding box of each detected object + // (in 3D space, by projecting them onto the image and seeing if they are in the bounding box) + void bb_pcl_project( + const sensor_msgs::msg::Image::ConstSharedPtr &in_img_msg, + const sensor_msgs::msg::PointCloud2::ConstSharedPtr &in_cloud_msg, + const all_seaing_interfaces::msg::LabeledBoundingBox2DArray::ConstSharedPtr &in_bbox_msg); + + int m_obstacle_id; + + // for cluster extraction + int m_obstacle_size_min; + int m_obstacle_size_max; + double m_clustering_distance; + + // for color segmentation + std::string color_label_mappings_file; + YAML::Node label_config_yaml; + std::map label_color_map; + + // for cluster-contour matching/selection + std::string matching_weights_file; + std::string contour_matching_color_ranges_file; + + YAML::Node matching_weights_config_yaml, contour_matching_ranges_config_yaml; + + double m_clustering_distance_weight; + std::vector m_clustering_color_weights; + double m_clustering_color_thres; + double m_cluster_contour_distance_weight; + std::vector m_cluster_contour_color_weights; + std::vector m_contour_detection_color_weights; + double m_cluster_contour_size_weight; + std::map> contour_matching_color_range_map; + +public: + BBoxProjectPCloud(); + virtual ~BBoxProjectPCloud(); +}; + +#endif // ALL_SEAING_PERCEPTION__BBOX_PROJECT_PCLOUD_HPP \ No newline at end of file diff --git a/all_seaing_perception/include/all_seaing_perception/object_tracking_map.hpp b/all_seaing_perception/include/all_seaing_perception/object_tracking_map.hpp new file mode 100644 index 00000000..eba40834 --- /dev/null +++ b/all_seaing_perception/include/all_seaing_perception/object_tracking_map.hpp @@ -0,0 +1,124 @@ +#ifndef ALL_SEAING_PERCEPTION__OBJECT_TRACKING_MAP_HPP +#define ALL_SEAING_PERCEPTION__OBJECT_TRACKING_MAP_HPP + +#include +#include +#include "yaml-cpp/yaml.h" + +#include + +#include "rclcpp/rclcpp.hpp" + +#include "tf2_ros/buffer.h" +#include "tf2_ros/transform_listener.h" + +#include "tf2/LinearMath/Matrix3x3.h" +#include "tf2/LinearMath/Quaternion.h" + +#include "image_geometry/pinhole_camera_model.h" +#include "message_filters/subscriber.h" + +#include "geometry_msgs/msg/transform_stamped.hpp" +#include "sensor_msgs/msg/camera_info.hpp" +#include "sensor_msgs/msg/image.hpp" +#include "sensor_msgs/msg/point_cloud2.hpp" +#include "builtin_interfaces/msg/time.hpp" +#include "nav_msgs/msg/odometry.hpp" +#include "std_msgs/msg/header.hpp" + +#include +#include +#include +#include +#include "pcl_conversions/pcl_conversions.h" + +#include "cv_bridge/cv_bridge.h" + +#include "tf2_geometry_msgs/tf2_geometry_msgs.hpp" +#include "tf2_sensor_msgs/tf2_sensor_msgs.hpp" + +#include "tf2/exceptions.h" +#include "tf2_ros/transform_listener.h" +#include "tf2_ros/buffer.h" + +#include "all_seaing_interfaces/msg/labeled_object_point_cloud_array.hpp" +#include "all_seaing_interfaces/msg/labeled_object_point_cloud.hpp" +#include "all_seaing_interfaces/msg/obstacle_map.hpp" + +#include "all_seaing_perception/obstacle.hpp" + +#include +#include + +//custom struct to also keep the points themselves with the obstacle (Obstacle doesn't do that and don't want to mess with it) +struct ObjectCloud{ + int id; + int label; + rclcpp::Time time_seen; + rclcpp::Time last_dead; + rclcpp::Duration time_dead = rclcpp::Duration(0,0); + bool is_dead; + pcl::PointCloud::Ptr local_pcloud_ptr; + pcl::PointCloud::Ptr global_pcloud_ptr; + pcl::PointXYZ local_centroid; + pcl::PointXYZ global_centroid; + + ObjectCloud(rclcpp::Time t, int l, pcl::PointCloud::Ptr loc, pcl::PointCloud::Ptr glob); + + void update_loc_pcloud(pcl::PointCloud::Ptr loc); +}; + +class ObjectTrackingMap : public rclcpp::Node{ +private: + //Publishes the map with the untracked (but labeled, with color) obstacles, and performs euclidean distance (in the global frame) object tracking + //using the centroid of the object point clouds to publish the map with the tracked and labeled obstacles + void object_track_map_publish(const all_seaing_interfaces::msg::LabeledObjectPointCloudArray::ConstSharedPtr &msg); + + void odom_callback(const nav_msgs::msg::Odometry &msg); + void publish_map(std_msgs::msg::Header local_header, std::string ns, bool is_labeled, + const std::vector> &map, + rclcpp::Publisher::SharedPtr pub, std::vector labels); + template + T convert_to_global(double nav_x, double nav_y, double nav_heading, T point); + template + T convert_to_local(double nav_x, double nav_y, double nav_heading, T point); + + // Get intrinsic camera model information needed for projection + void intrinsics_cb(const sensor_msgs::msg::CameraInfo &info_msg); + + // Get transform from source frame to target frame + geometry_msgs::msg::TransformStamped get_tf(const std::string &in_target_frame, + const std::string &in_src_frame); + + // Member variables + std::vector> m_tracked_obstacles; + std::string m_global_frame_id; + int m_obstacle_id; + double m_obstacle_seg_thresh; + double m_obstacle_drop_thresh; + + float m_nav_x, m_nav_y, m_nav_heading; + + geometry_msgs::msg::Pose m_nav_pose; + + // Publishers and subscribers + rclcpp::Publisher::SharedPtr m_untracked_map_pub; + rclcpp::Publisher::SharedPtr m_tracked_map_pub; + rclcpp::Subscription::SharedPtr m_object_sub; + rclcpp::Subscription::SharedPtr m_odom_sub; + rclcpp::Subscription::SharedPtr m_image_intrinsics_sub; + + // Transform variables + std::shared_ptr m_tf_listener{nullptr}; + std::unique_ptr m_tf_buffer; + geometry_msgs::msg::TransformStamped m_pc_cam_tf; + bool m_pc_cam_tf_ok; + + // Intrinsics callback camera model variables + image_geometry::PinholeCameraModel m_cam_model; +public: + ObjectTrackingMap(); + virtual ~ObjectTrackingMap(); +}; + +#endif // ALL_SEAING_PERCEPTION__OBJECT_TRACKING_MAP_HPP \ No newline at end of file diff --git a/all_seaing_perception/launch/bbox_project_pcloud_test.launch.py b/all_seaing_perception/launch/bbox_project_pcloud_test.launch.py new file mode 100644 index 00000000..6227ac76 --- /dev/null +++ b/all_seaing_perception/launch/bbox_project_pcloud_test.launch.py @@ -0,0 +1,44 @@ +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +import launch_ros +import os + +def generate_launch_description(): + bringup_prefix = get_package_share_directory("all_seaing_bringup") + color_label_mappings = os.path.join( + bringup_prefix, "config", "perception", "color_label_mappings.yaml" + ) + color_ranges = os.path.join( + bringup_prefix, "config", "perception", "color_ranges.yaml" + ) + matching_weights = os.path.join( + bringup_prefix, "config", "perception", "matching_weights.yaml" + ) + contour_matching_color_ranges = os.path.join( + bringup_prefix, "config", "perception", "contour_matching_color_ranges.yaml" + ) + bbox_project_pcloud_node = launch_ros.actions.Node( + package="all_seaing_perception", + executable="bbox_project_pcloud", + output="screen", + remappings=[ + ("camera_info_topic", "/wamv/sensors/cameras/front_left_camera_sensor/camera_info"), + ("camera_topic", "/wamv/sensors/cameras/front_left_camera_sensor/image_raw"), + ("lidar_topic", "point_cloud/filtered") + ], + parameters=[ + {"bbox_object_margin": 0.0}, + {"color_label_mappings_file": color_label_mappings}, + {"color_ranges_file": color_ranges}, + {"obstacle_size_min": 2}, + {"obstacle_size_max": 60}, + {"clustering_distance": 1.0}, + {"matching_weights_file": matching_weights}, + {"contour_matching_color_ranges_file": contour_matching_color_ranges} + ] + ) + return LaunchDescription([ + bbox_project_pcloud_node + ]) \ No newline at end of file diff --git a/all_seaing_perception/launch/object_tracking_map_test.launch.py b/all_seaing_perception/launch/object_tracking_map_test.launch.py new file mode 100644 index 00000000..e4f8aeca --- /dev/null +++ b/all_seaing_perception/launch/object_tracking_map_test.launch.py @@ -0,0 +1,26 @@ +from ament_index_python.packages import get_package_share_directory +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +import launch_ros +import os + +def generate_launch_description(): + bringup_prefix = get_package_share_directory("all_seaing_bringup") + object_tracking_map_node = launch_ros.actions.Node( + package="all_seaing_perception", + executable="object_tracking_map", + output="screen", + arguments=['--ros-args', '--log-level', 'debug'], + remappings=[ + ("camera_info_topic", "/wamv/sensors/cameras/front_left_camera_sensor/camera_info"), + ], + parameters=[ + {"global_frame_id": "odom"}, + {"obstacle_seg_thresh": 10.0}, + {"obstacle_drop_thresh": 1.0}, + ] + ) + return LaunchDescription([ + object_tracking_map_node + ]) \ No newline at end of file diff --git a/all_seaing_perception/launch/yolov8_detection.launch.py b/all_seaing_perception/launch/yolov8_detection.launch.py deleted file mode 100644 index f371b883..00000000 --- a/all_seaing_perception/launch/yolov8_detection.launch.py +++ /dev/null @@ -1,52 +0,0 @@ -from ament_index_python.packages import get_package_share_directory -from launch import LaunchDescription -from launch.actions import IncludeLaunchDescription -from launch.launch_description_sources import PythonLaunchDescriptionSource - -""" -Reference: https://github.com/mgonzs13/yolov8_ros - -Instructions: -$ cd ~/arcturus/dev_ws/src/ -$ git clone https://github.com/mgonzs13/yolov8_ros.git -$ pip install -r yolov8_ros/requirements.txt -$ cd ~/arcturus/dev_ws/ -$ rosdep install --from-paths src --ignore-src -r -y -$ colcon build --symlink-install -$ source ~/arcturus/dev_ws/install/setup.bash -$ source ~/arcturus/vrx_ws/install/setup.bash - -follow those instructions before running script - -Parameters to Yolov8 Launch - model: YOLOv8 model (default: yolov8m.pt) - tracker: tracker file (default: bytetrack.yaml) - device: GPU/CUDA (default: cuda:0) - enable: wether to start YOLOv8 enabled (default: True) - threshold: detection threshold (default: 0.5) - input_image_topic: camera topic of RGB images (default: /camera/rgb/image_raw) - image_reliability: reliability for the image topic: 0=system default, 1=Reliable, 2=Best Effort (default: 2) - -Ensure that the model parameter has the correct path to a yolov8 model -and that input_image_topic subscribes to the correct topic (such as zed) -""" - - -def generate_launch_description(): - - yolov8_prefix = get_package_share_directory("yolov8_bringup") - model_share_path = get_package_share_directory("all_seaing_perception") - - return LaunchDescription( - [ - IncludeLaunchDescription( - PythonLaunchDescriptionSource( - [yolov8_prefix, "/launch/yolov8.launch.py"] - ), - launch_arguments={ - "model": model_share_path + "/models/yolov8_roboboat_model.pt", - "input_image_topic": "/webcam_image", - }.items(), - ) - ] - ) diff --git a/all_seaing_rviz_plugins/CMakeLists.txt b/all_seaing_rviz_plugins/CMakeLists.txt index 691fa792..6280375c 100644 --- a/all_seaing_rviz_plugins/CMakeLists.txt +++ b/all_seaing_rviz_plugins/CMakeLists.txt @@ -6,6 +6,10 @@ if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") endif() find_package(ament_cmake_auto REQUIRED) +find_package(ament_cmake_ros REQUIRED) +find_package(pluginlib REQUIRED) +find_package(rviz_common REQUIRED) +find_package(all_seaing_interfaces REQUIRED) ament_auto_find_build_dependencies() include_directories(include) @@ -35,4 +39,4 @@ ament_export_include_directories(include) ament_export_targets(export_all_seaing_rviz_plugins) pluginlib_export_plugin_description_file(rviz_common plugins_description.xml) -ament_auto_package(INSTALL_TO_SHARE plugins_description.xml) \ No newline at end of file +ament_auto_package() \ No newline at end of file