Skip to content

Commit b101b56

Browse files
authoredAug 10, 2021
Noetic depth align beta (#22)
* WIP ImageTransport * Half working template * Not working Template Specialization * Changed enable if type * Working ImageTransport: * Restructuring * Moved advertizer tag dispatchers as private member functions * Changed args of advertiser * Added ROS_DEBUG and WARN logging * Added camera Info Converter WIP * Tweaked for boost::array * Added overloading constructor for BridgePublisher * Fixed Convertion to Boost issues * Modified warning on PublishThread * Added Rectification information * Added CI for depthai-bridge (#18) - Added ClangFormat - Added CI for style check and build * Added Crop Service * Updated ROS type image publishers
1 parent cf50060 commit b101b56

18 files changed

+865
-822
lines changed
 

‎.clang-format

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
BasedOnStyle: Google
2+
NamespaceIndentation: None
3+
BreakBeforeBinaryOperators: NonAssignment
4+
SpaceBeforeParens: Never
5+
IndentPPDirectives: BeforeHash
6+
AlignOperands: true
7+
DerivePointerAlignment: false
8+
PointerAlignment: Left
9+
BinPackArguments: false
10+
BinPackParameters: false
11+
AllowShortFunctionsOnASingleLine: Empty
12+
IndentWidth: 4
13+
ColumnLimit: 160
14+
AllowShortIfStatementsOnASingleLine: WithoutElse

‎.github/workflows/examples.repos

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
repositories:
2+
luxonis/depthai-ros-examples:
3+
type: git
4+
url: https://github.com/luxonis/depthai-ros-examples.git
5+
version: noetic-examples-update

‎.github/workflows/main.workflow.yml

+115
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
2+
name: DepthAI ROS CI/CD
3+
4+
on:
5+
workflow_dispatch:
6+
push:
7+
branches:
8+
- noetic-devel
9+
tags:
10+
- 'v*'
11+
pull_request:
12+
branches:
13+
- noetic-devel
14+
15+
jobs:
16+
17+
style:
18+
runs-on: ubuntu-20.04
19+
20+
steps:
21+
- uses: actions/checkout@v2
22+
23+
- name: Clang-Format lint
24+
25+
uses: DoozyX/clang-format-lint-action@v0.12
26+
with:
27+
source: '.'
28+
extensions: 'h,hpp,c,cpp'
29+
clangFormatVersion: 10
30+
31+
ROS1-build:
32+
if: startsWith(github.ref, 'refs/heads/noetic')
33+
runs-on: ${{ matrix.os }}
34+
strategy:
35+
matrix:
36+
os: [ubuntu-18.04, ubuntu-20.04]
37+
38+
steps:
39+
- uses: actions/checkout@v2
40+
41+
- name: Melodic build
42+
if: matrix.os == 'ubuntu-18.04'
43+
uses: ros-tooling/setup-ros@v0.2
44+
with:
45+
required-ros-distributions: melodic
46+
47+
- name: Noetic build
48+
if: matrix.os == 'ubuntu-20.04'
49+
uses: ros-tooling/setup-ros@v0.2
50+
with:
51+
required-ros-distributions: noetic
52+
- name: Installing DepthAi Core
53+
run: sudo wget -qO- https://raw.githubusercontent.com/luxonis/depthai-ros/noetic-devel/install_dependencies.sh | sudo bash
54+
55+
- name: Build depthai-bridge melodic
56+
if: matrix.os == 'ubuntu-18.04'
57+
uses: ros-tooling/action-ros-ci@v0.2
58+
with:
59+
# vcs-repo-file-url: "${{ github.workspace }}/.github/workflows/examples.repos"
60+
# package-name: depthai_examples
61+
target-ros1-distro: melodic
62+
skip-tests: true
63+
64+
- name: Build depthai-bridge noetic
65+
if: matrix.os == 'ubuntu-20.04'
66+
uses: ros-tooling/action-ros-ci@v0.2
67+
with:
68+
# vcs-repo-file-url: "${{ github.workspace }}/.github/workflows/examples.repos"
69+
# package-name: depthai_examples
70+
target-ros1-distro: noetic
71+
skip-tests: true
72+
73+
74+
ROS2-build:
75+
if: startsWith(github.ref, 'refs/heads/foxy')
76+
runs-on: ubuntu-20.04
77+
strategy:
78+
matrix:
79+
ros_distribution: [foxy, galactic]
80+
81+
steps:
82+
- uses: actions/checkout@v2
83+
84+
- name: ${{ matrix.ros_distribution }} build
85+
uses: ros-tooling/setup-ros@v0.2
86+
with:
87+
required-ros-distributions: ${{ matrix.ros_distribution }}
88+
89+
- name: Installing DepthAi Core
90+
run: sudo wget -qO- https://raw.githubusercontent.com/luxonis/depthai-ros/noetic-devel/install_dependencies.sh | sudo bash
91+
92+
- name: Build depthai-bridge ${{ matrix.ros_distribution }}
93+
uses: ros-tooling/action-ros-ci@v0.2
94+
with:
95+
# vcs-repo-file-url: "${{ github.workspace }}/.github/workflows/examples.repos"
96+
# package-name: depthai_examples
97+
target-ros1-distro: ${{ matrix.ros_distribution }}
98+
skip-tests: true
99+
100+
101+
102+
# build_:
103+
# runs-on: ubuntu-18.04
104+
# steps:
105+
# - uses: actions/checkout@v2
106+
# - uses: ros-tooling/setup-ros@v0.2
107+
# with:
108+
# required-ros-distributions: melodic
109+
# - run: sudo wget -qO- https://raw.githubusercontent.com/luxonis/depthai-ros/noetic-devel/install_dependencies.sh | sudo bash
110+
# - uses: ros-tooling/action-ros-ci@v0.2
111+
# with:
112+
# # vcs-repo-file-url: "${{ github.workspace }}/.github/workflows/examples.repos"
113+
# # package-name: depthai_examples
114+
# target-ros1-distro: melodic
115+
# skip-tests: true

‎.gitignore

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
*.json
2+
.vscode
3+
devel

‎depthai_bridge/CMakeLists.txt

+26-191
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,10 @@
11
cmake_minimum_required(VERSION 3.0.2)
2-
3-
# get_filename_component (DEPTHAI_CORE "${CMAKE_CURRENT_LIST_DIR}/../external/depthai-core" REALPATH)
4-
# include ("${DEPTHAI_CORE}/cmake/HunterGate.cmake")
5-
62
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
7-
# set (HUNTER_CONFIGURATION_TYPES "Release" CACHE STRING "Hunter dependencies list of build configurations")
8-
9-
# HunterGate (
10-
# URL "https://github.com/cpp-pm/hunter/archive/v0.23.258.tar.gz"
11-
# SHA1 "062a19ab13ce8dffa9a882b6ce3e43bdabdf75d3"
12-
# FILEPATH ${DEPTHAI_CORE}/cmake/Hunter/config.cmake # Add depthai-core config (hunter limitation)
13-
# )
14-
153

164
project(depthai_bridge)
175
set(CMAKE_CXX_STANDARD 14)
186
set(CMAKE_CXX_STANDARD_REQUIRED ON)
197

20-
## Compile as C++11, supported in ROS Kinetic and newer
21-
# add_compile_options(-std=c++11)
22-
23-
## Find catkin macros and libraries
24-
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
25-
## is used, also find other catkin packages
26-
27-
28-
# find_package(ament_cmake QUIET)
29-
308
set(_opencv_version 4)
319
find_package(OpenCV 4 QUIET COMPONENTS imgproc highgui)
3210
if(NOT OpenCV_FOUND)
@@ -36,77 +14,34 @@ if(NOT OpenCV_FOUND)
3614
endif()
3715

3816

39-
# if ( ament_cmake_FOUND )
40-
41-
# # Not adding -DUSING_ROS since xml_parsing.cpp hasn't been ported to ROS2
42-
43-
# find_package(depthai_ros_msgs REQUIRED)
44-
# find_package(rclcpp REQUIRED)
45-
# find_package(sensor_msgs REQUIRED)
46-
# find_package(std_msgs REQUIRED)
47-
# find_package(vision_msgs REQUIRED)
48-
49-
# message(STATUS "------------------------------------------")
50-
# message(STATUS "Depthai bridge is being built using AMENT.")
51-
# message(STATUS "------------------------------------------")
52-
53-
# list(APPEND DEPENDENCY_PUBLIC_LIBRARIES
54-
# [depthai_ros_msgs,
55-
# rclcpp,
56-
# sensor_msgs,
57-
# std_msgs,
58-
# vision_msgs])
59-
60-
# set(BUILD_TOOL_INCLUDE_DIRS ${ament_INCLUDE_DIRS})
61-
62-
# elseif( CATKIN_DEVEL_PREFIX OR CATKIN_BUILD_BINARY_PACKAGE)
63-
64-
set(catkin_FOUND 1)
65-
# add_definitions( -DUSING_ROS )
66-
# message(STATUS "------------------------------------------")
67-
68-
find_package(catkin REQUIRED COMPONENTS
69-
camera_info_manager
70-
depthai_ros_msgs
71-
roscpp
72-
sensor_msgs
73-
std_msgs
74-
stereo_msgs
75-
vision_msgs
76-
)
77-
# find_package(GTest)
78-
find_package(Boost REQUIRED)
79-
80-
message(STATUS "------------------------------------------")
81-
message(STATUS "Depthai bridge is being built using CATKIN.")
82-
message(STATUS "------------------------------------------")
83-
84-
find_package(depthai CONFIG REQUIRED)
85-
# find_package(depthai REQUIRED PATHS "/home/sachin/Desktop/luxonis/depthai-core/build/install/lib/cmake")
86-
87-
catkin_package(
88-
INCLUDE_DIRS include
89-
LIBRARIES ${PROJECT_NAME}
90-
CATKIN_DEPENDS camera_info_manager depthai_ros_msgs roscpp sensor_msgs std_msgs stereo_msgs vision_msgs
91-
)
92-
93-
# catkin_package(
94-
# INCLUDE_DIRS include # do not include "3rdparty" here
95-
# LIBRARIES ${BEHAVIOR_TREE_LIBRARY}
96-
# CATKIN_DEPENDS roslib
97-
# )
98-
99-
list(APPEND DEPENDENCY_PUBLIC_LIBRARIES ${catkin_LIBRARIES})
100-
set(BUILD_TOOL_INCLUDE_DIRS ${catkin_INCLUDE_DIRS})
101-
# endif()
102-
103-
# set(DAI_LIBRARIES "/home/sachin/Desktop/luxonis/depthai-core/build/install/lib")
104-
# set(DAI_INCLUDE_LIBRARIES "/home/sachin/Desktop/luxonis/depthai-core/build/install/include")
17+
find_package(catkin REQUIRED COMPONENTS
18+
cv_bridge
19+
camera_info_manager
20+
depthai_ros_msgs
21+
image_transport
22+
roscpp
23+
sensor_msgs
24+
stereo_msgs
25+
std_msgs
26+
vision_msgs
27+
)
28+
29+
find_package(Boost REQUIRED)
30+
31+
find_package(depthai CONFIG REQUIRED)
32+
33+
catkin_package(
34+
INCLUDE_DIRS include
35+
LIBRARIES ${PROJECT_NAME}
36+
CATKIN_DEPENDS depthai_ros_msgs camera_info_manager roscpp sensor_msgs std_msgs vision_msgs image_transport cv_bridge stereo_msgs
37+
)
38+
39+
list(APPEND DEPENDENCY_PUBLIC_LIBRARIES ${catkin_LIBRARIES})
40+
set(BUILD_TOOL_INCLUDE_DIRS ${catkin_INCLUDE_DIRS})
10541

10642
include_directories(
10743
include
10844
${BUILD_TOOL_INCLUDE_DIRS}
109-
# ${OpenCV_INCLUDE_DIRS}
11045
${Boost_INCLUDE_DIRS}
11146
${depthai}
11247
)
@@ -119,99 +54,19 @@ FILE(GLOB LIB_SRC
11954
add_library(${PROJECT_NAME}
12055
${LIB_SRC}
12156
)
122-
message("----------------------------------------------------------------------")
123-
message("${CMAKE_MODULE_PATH}")
57+
12458
add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS}
12559
${catkin_EXPORTED_TARGETS}
12660
)
12761

12862
target_link_libraries(${PROJECT_NAME}
12963
${catkin_LIBRARIES}
13064
depthai::core
65+
depthai::opencv
13166
opencv_imgproc
13267
opencv_highgui
13368
)
13469

135-
# ament_export_include_directories(include)
136-
# ament_export_libraries(${PROJECT_NAME})
137-
# ament_export_dependencies(
138-
# ${DEPENDENCY_PUBLIC_LIBRARIES}
139-
# )
140-
141-
142-
###################################
143-
## catkin specific configuration ##
144-
###################################
145-
## The catkin_package macro generates cmake config files for your package
146-
## Declare things to be passed to dependent projects
147-
## INCLUDE_DIRS: uncomment this if your package contains header files
148-
## LIBRARIES: libraries you create in this project that dependent projects also need
149-
## CATKIN_DEPENDS: catkin_packages dependent projects also need
150-
## DEPENDS: system dependencies of this project that dependent projects also need
151-
152-
###########
153-
## Build ##
154-
###########
155-
156-
## Specify additional locations of header files
157-
## Your package locations should be listed before other locations
158-
# include_directories(
159-
# include
160-
# ${catkin_INCLUDE_DIRS}
161-
# )
162-
163-
## Declare a C++ library
164-
# add_library(${PROJECT_NAME}
165-
# src/${PROJECT_NAME}/depthai_bridge.cpp
166-
# )
167-
168-
## Add cmake target dependencies of the library
169-
## as an example, code may need to be generated before libraries
170-
## either from message generation or dynamic reconfigure
171-
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
172-
173-
## Declare a C++ executable
174-
## With catkin_make all packages are built within a single CMake context
175-
## The recommended prefix ensures that target names across packages don't collide
176-
# add_executable(${PROJECT_NAME}_node src/depthai_bridge_node.cpp)
177-
178-
## Rename C++ executable without prefix
179-
## The above recommended prefix causes long target names, the following renames the
180-
## target back to the shorter version for ease of user use
181-
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
182-
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
183-
184-
## Add cmake target dependencies of the executable
185-
## same as for the library above
186-
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
187-
188-
## Specify libraries to link a library or executable target against
189-
# target_link_libraries(${PROJECT_NAME}_node
190-
# ${catkin_LIBRARIES}
191-
# )
192-
193-
#############
194-
## Install ##
195-
#############
196-
197-
# all install targets should use catkin DESTINATION variables
198-
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
199-
200-
## Mark executable scripts (Python etc.) for installation
201-
## in contrast to setup.py, you can choose the destination
202-
# catkin_install_python(PROGRAMS
203-
# scripts/my_python_script
204-
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
205-
# )
206-
207-
## Mark executables for installation
208-
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
209-
# install(TARGETS ${PROJECT_NAME}_node
210-
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
211-
# )
212-
213-
## Mark libraries for installation
214-
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
21570
install(TARGETS ${PROJECT_NAME}
21671
ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
21772
LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
@@ -229,23 +84,3 @@ install(DIRECTORY
22984
urdf
23085
DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
23186
)
232-
233-
## Mark other files for installation (e.g. launch and bag files, etc.)
234-
# install(FILES
235-
# # myfile1
236-
# # myfile2
237-
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
238-
# )
239-
240-
#############
241-
## Testing ##
242-
#############
243-
244-
## Add gtest based cpp test target and link libraries
245-
# catkin_add_gtest(${PROJECT_NAME}-test test/test_depthai_bridge.cpp)
246-
# if(TARGET ${PROJECT_NAME}-test)
247-
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
248-
# endif()
249-
250-
## Add folders to be run by python nosetests
251-
# catkin_add_nosetests(test)

‎depthai_bridge/include/depthai_bridge/BridgePublisher.hpp

+219-131
Large diffs are not rendered by default.
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,29 @@
11
#pragma once
22

3-
#include <sstream>
43
#include <iostream>
4+
#include <opencv2/opencv.hpp>
5+
#include <sstream>
56
#include <unordered_map>
67

7-
#include "stereo_msgs/DisparityImage.h"
8-
98
#include "depthai/depthai.hpp"
10-
#include <opencv2/opencv.hpp>
9+
#include "stereo_msgs/DisparityImage.h"
1110

1211
namespace dai::rosBridge {
1312
using TimePoint = std::chrono::time_point<std::chrono::steady_clock, std::chrono::steady_clock::duration>;
1413

15-
class DisparityConverter{
16-
17-
public:
14+
class DisparityConverter {
15+
public:
1816
// DisparityConverter() = default;
1917
DisparityConverter(const std::string frameName, float focalLength, float baseline = 7.5, float minDepth = 80, float maxDepth = 1100);
20-
18+
2119
void toRosMsg(std::shared_ptr<dai::ImgFrame> inData, stereo_msgs::DisparityImage& outImageMsg);
2220
stereo_msgs::DisparityImagePtr toRosMsgPtr(std::shared_ptr<dai::ImgFrame> inData);
2321

2422
// void toDaiMsg(const stereo_msgs::DisparityImage& inMsg, dai::ImgFrame& outData);
2523

26-
private:
27-
const std::string _frameName = "";
28-
const float _focalLength = 882.2, _baseline = 7.5, _minDepth = 80, _maxDepth;
29-
24+
private:
25+
const std::string _frameName = "";
26+
const float _focalLength = 882.2, _baseline = 7.5, _minDepth = 80, _maxDepth;
3027
};
3128

32-
} // namespace dai::rosBridge
29+
} // namespace dai::rosBridge
Original file line numberDiff line numberDiff line change
@@ -1,44 +1,51 @@
11
#pragma once
22

3-
#include <sstream>
43
#include <iostream>
4+
#include <opencv2/opencv.hpp>
5+
#include <sstream>
56
#include <unordered_map>
67

7-
#include "sensor_msgs/Image.h"
8+
#include "depthai-shared/common/CameraBoardSocket.hpp"
89
#include "depthai/depthai.hpp"
9-
#include <opencv2/opencv.hpp>
10+
#include "sensor_msgs/CameraInfo.h"
11+
#include "sensor_msgs/Image.h"
1012

1113
namespace dai::rosBridge {
1214
using TimePoint = std::chrono::time_point<std::chrono::steady_clock, std::chrono::steady_clock::duration>;
1315

14-
class ImageConverter{
15-
16-
public:
16+
class ImageConverter {
17+
public:
1718
// ImageConverter() = default;
1819
ImageConverter(const std::string frameName, bool interleaved);
1920
ImageConverter(bool interleaved);
20-
21+
2122
void toRosMsg(std::shared_ptr<dai::ImgFrame> inData, sensor_msgs::Image& outImageMsg);
2223
sensor_msgs::ImagePtr toRosMsgPtr(std::shared_ptr<dai::ImgFrame> inData);
2324

2425
void toDaiMsg(const sensor_msgs::Image& inMsg, dai::ImgFrame& outData);
2526

26-
/** TODO(sachin): Add support for ros msg to cv mat since we have some
27-
* encodings which cv supports but ros doesn't
28-
**/
27+
/** TODO(sachin): Add support for ros msg to cv mat since we have some
28+
* encodings which cv supports but ros doesn't
29+
**/
2930
cv::Mat rosMsgtoCvMat(sensor_msgs::Image& inMsg);
3031

31-
private:
32-
static std::unordered_map<dai::RawImgFrame::Type, std::string> encodingEnumMap;
33-
static std::unordered_map<dai::RawImgFrame::Type, std::string> planarEncodingEnumMap;
32+
sensor_msgs::CameraInfo calibrationToCameraInfo(dai::CalibrationHandler calibHandler,
33+
dai::CameraBoardSocket cameraId,
34+
int width = -1,
35+
int height = -1,
36+
Point2f topLeftPixelId = Point2f(),
37+
Point2f bottomRightPixelId = Point2f());
38+
39+
private:
40+
static std::unordered_map<dai::RawImgFrame::Type, std::string> encodingEnumMap;
41+
static std::unordered_map<dai::RawImgFrame::Type, std::string> planarEncodingEnumMap;
3442

3543
// dai::RawImgFrame::Type _srcType;
3644
bool _daiInterleaved;
3745
// bool c
3846
const std::string _frameName = "";
39-
void planarToInterleaved(const std::vector<uint8_t>& srcData, std::vector<uint8_t>& destData, int w, int h , int numPlanes, int bpp);
40-
void interleavedToPlanar(const std::vector<uint8_t>& srcData, std::vector<uint8_t>& destData, int w, int h , int numPlanes, int bpp);
41-
47+
void planarToInterleaved(const std::vector<uint8_t>& srcData, std::vector<uint8_t>& destData, int w, int h, int numPlanes, int bpp);
48+
void interleavedToPlanar(const std::vector<uint8_t>& srcData, std::vector<uint8_t>& destData, int w, int h, int numPlanes, int bpp);
4249
};
4350

44-
} // namespace dai::rosBridge
51+
} // namespace dai::rosBridge
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,38 @@
1-
#include "depthai/depthai.hpp"
2-
#include "sensor_msgs/Image.h"
3-
#include <depthai_bridge/ImageConverter.hpp>
41
#include <vision_msgs/Detection2DArray.h>
52

63
#include <boost/make_shared.hpp>
74
#include <boost/shared_ptr.hpp>
5+
#include <depthai_bridge/ImageConverter.hpp>
6+
7+
#include "depthai/depthai.hpp"
8+
#include "sensor_msgs/Image.h"
89

910
namespace dai::rosBridge {
1011

1112
class ImgDetectionConverter {
12-
public:
13-
// DetectionConverter() = default;
14-
ImgDetectionConverter(std::string frameName, int width, int height, bool normalized = false);
13+
public:
14+
// DetectionConverter() = default;
15+
ImgDetectionConverter(std::string frameName, int width, int height, bool normalized = false);
1516

16-
void toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData,
17-
vision_msgs::Detection2DArray &opDetectionMsg, TimePoint tStamp, int32_t sequenceNum = -1);
17+
void toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData, vision_msgs::Detection2DArray& opDetectionMsg, TimePoint tStamp, int32_t sequenceNum = -1);
1818

19-
void toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData,
20-
vision_msgs::Detection2DArray &opDetectionMsg);
19+
void toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData, vision_msgs::Detection2DArray& opDetectionMsg);
2120

22-
vision_msgs::Detection2DArray::Ptr toRosMsgPtr(std::shared_ptr<dai::ImgDetections> inNetData);
21+
vision_msgs::Detection2DArray::Ptr toRosMsgPtr(std::shared_ptr<dai::ImgDetections> inNetData);
2322

24-
private:
25-
uint32_t _sequenceNum;
26-
int _width, _height;
27-
const std::string _frameName;
28-
bool _normalized;
23+
private:
24+
uint32_t _sequenceNum;
25+
int _width, _height;
26+
const std::string _frameName;
27+
bool _normalized;
2928
};
3029

30+
/** TODO(sachin): Do we need to have ros msg -> dai bounding box ?
31+
* is there any situation where we would need to have xlinkin to take bounding
32+
* box as input. One scenario would to take this as input and use ImageManip
33+
* node to crop the roi of the image. Since it is not available yet. Leaving
34+
* it out for now to speed up on other tasks feel free to raise a issue if you
35+
* feel that feature is good to have...
36+
*/
3137

32-
33-
/** TODO(sachin): Do we need to have ros msg -> dai bounding box ?
34-
* is there any situation where we would need to have xlinkin to take bounding
35-
* box as input. One scenario would to take this as input and use ImageManip
36-
* node to crop the roi of the image. Since it is not available yet. Leaving
37-
* it out for now to speed up on other tasks feel free to raise a issue if you
38-
* feel that feature is good to have...
39-
*/
40-
41-
} // namespace dai::rosBridge
38+
} // namespace dai::rosBridge
Original file line numberDiff line numberDiff line change
@@ -1,40 +1,42 @@
1-
#include "depthai/depthai.hpp"
2-
#include "sensor_msgs/Image.h"
3-
#include <depthai_bridge/ImageConverter.hpp>
41
#include <depthai_ros_msgs/SpatialDetectionArray.h>
52
#include <vision_msgs/Detection2DArray.h>
63

74
#include <boost/make_shared.hpp>
85
#include <boost/shared_ptr.hpp>
6+
#include <depthai_bridge/ImageConverter.hpp>
7+
8+
#include "depthai/depthai.hpp"
9+
#include "sensor_msgs/Image.h"
910

1011
namespace dai::rosBridge {
1112

1213
class SpatialDetectionConverter {
13-
public:
14-
// DetectionConverter() = default;
15-
SpatialDetectionConverter(std::string frameName, int width, int height, bool normalized = false);
14+
public:
15+
// DetectionConverter() = default;
16+
SpatialDetectionConverter(std::string frameName, int width, int height, bool normalized = false);
1617

17-
void toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData,
18-
depthai_ros_msgs::SpatialDetectionArray &opDetectionMsg, TimePoint tStamp, int32_t sequenceNum = -1);
18+
void toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData,
19+
depthai_ros_msgs::SpatialDetectionArray& opDetectionMsg,
20+
TimePoint tStamp,
21+
int32_t sequenceNum = -1);
1922

20-
void toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData,
21-
depthai_ros_msgs::SpatialDetectionArray &opDetectionMsg);
23+
void toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData, depthai_ros_msgs::SpatialDetectionArray& opDetectionMsg);
2224

23-
depthai_ros_msgs::SpatialDetectionArray::Ptr toRosMsgPtr(std::shared_ptr<dai::SpatialImgDetections> inNetData);
25+
depthai_ros_msgs::SpatialDetectionArray::Ptr toRosMsgPtr(std::shared_ptr<dai::SpatialImgDetections> inNetData);
2426

25-
private:
26-
uint32_t _sequenceNum;
27-
int _width, _height;
28-
const std::string _frameName;
29-
bool _normalized;
27+
private:
28+
uint32_t _sequenceNum;
29+
int _width, _height;
30+
const std::string _frameName;
31+
bool _normalized;
3032
};
3133

32-
/** TODO(sachin): Do we need to have ros msg -> dai bounding box ?
33-
* is there any situation where we would need to have xlinkin to take bounding
34-
* box as input. One scenario would to take this as input and use ImageManip
35-
* node to crop the roi of the image. Since it is not available yet. Leaving
36-
* it out for now to speed up on other tasks feel free to raise a issue if you
37-
* feel that feature is good to have...
38-
*/
34+
/** TODO(sachin): Do we need to have ros msg -> dai bounding box ?
35+
* is there any situation where we would need to have xlinkin to take bounding
36+
* box as input. One scenario would to take this as input and use ImageManip
37+
* node to crop the roi of the image. Since it is not available yet. Leaving
38+
* it out for now to speed up on other tasks feel free to raise a issue if you
39+
* feel that feature is good to have...
40+
*/
3941

40-
} // namespace dai::rosBridge
42+
} // namespace dai::rosBridge

‎depthai_bridge/launch/urdf.launch

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,5 @@
2828

2929

3030
<node name="$(arg camera_name)_state_publisher" pkg="robot_state_publisher" type="robot_state_publisher" output="screen" required="true">
31-
<remap from="robot_description" to="$(arg camera_name)_description" /></node>
31+
<remap from="robot_description" to="$(arg camera_name)_description" /></node>
3232
</launch>

‎depthai_bridge/package.xml

+15-18
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
<?xml version="1.0"?>
22
<package format="3">
33
<name>depthai_bridge</name>
4-
<version>0.0.0</version>
4+
<version>1.0.0</version>
55
<description>The depthai_bridge package</description>
66

77
<!-- One maintainer tag required, multiple allowed, one person per tag -->
@@ -54,26 +54,23 @@
5454
<buildtool_depend condition="$ROS_VERSION == 1">catkin</buildtool_depend>
5555
<buildtool_depend condition="$ROS_VERSION == 2">ament_cmake</buildtool_depend>
5656

57-
<build_depend>depthai_ros_msgs</build_depend>
58-
<build_depend>libboost-dev</build_depend>
59-
<build_depend>roscpp</build_depend>
60-
<build_depend>sensor_msgs</build_depend>
61-
<build_depend>std_msgs</build_depend>
57+
<!-- <build_export_depend>libboost-dev</build_export_depend> -->
58+
<depend>libboost-dev</depend>
6259

63-
<build_export_depend>depthai_ros_msgs</build_export_depend>
64-
<build_export_depend>libboost-dev</build_export_depend>
65-
<build_export_depend>roscpp</build_export_depend>
66-
<build_export_depend>sensor_msgs</build_export_depend>
67-
<build_export_depend>std_msgs</build_export_depend>
68-
<depend>libopencv-dev</depend>
69-
<depend>vision_msgs</depend>
70-
<exec_depend>depthai_ros_msgs</exec_depend>
71-
<exec_depend>roscpp</exec_depend>
72-
<exec_depend>sensor_msgs</exec_depend>
73-
<exec_depend>std_msgs</exec_depend>
60+
61+
<depend>cv_bridge</depend>
7462
<depend>camera_info_manager</depend>
63+
<depend>depthai_ros_msgs</depend>
64+
<depend>image_transport</depend>
65+
<depend>libopencv-dev</depend>
66+
<depend>roscpp</depend>
67+
<depend>sensor_msgs</depend>
68+
<depend>std_msgs</depend>
7569
<depend>stereo_msgs</depend>
76-
70+
<depend>vision_msgs</depend>4
71+
<exec_depend>robot_state_publisher</exec_depend>
72+
<exec_depend>xacro</exec_depend>
73+
7774
<!-- The export tag contains other, unspecified, tags -->
7875
<export>
7976
<!-- <build_type condition="$ROS_VERSION == 1">catkin</build_type>
+65-76
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1+
#include <ros/ros.h>
2+
13
#include <boost/make_shared.hpp>
24
#include <depthai_bridge/DisparityConverter.hpp>
3-
#include <ros/ros.h>
5+
46
#include "sensor_msgs/image_encodings.h"
57

68
// FIXME(Sachin): Do I need to convert the encodings that are available in dai
@@ -14,7 +16,7 @@
1416
// it planar
1517
namespace dai::rosBridge {
1618

17-
/*
19+
/*
1820
std::unordered_map<dai::RawImgFrame::Type, std::string> DisparityConverter::encodingEnumMap = {
1921
{dai::RawImgFrame::Type::YUV422i , "yuv422" },
2022
{dai::RawImgFrame::Type::RGBA8888 , "rgba8" },
@@ -28,87 +30,75 @@ std::unordered_map<dai::RawImgFrame::Type, std::string> DisparityConverter::enco
2830
2931
std::unordered_map<dai::RawImgFrame::Type, std::string> DisparityConverter::planarEncodingEnumMap = {
3032
{dai::RawImgFrame::Type::BGR888p, "3_1_bgr8"}, // 3_1_bgr8 represents 3 planes/channels and 1 byte per pixel in BGR format
31-
{dai::RawImgFrame::Type::NV12 , "nv12" }
33+
{dai::RawImgFrame::Type::NV12 , "nv12" }
3234
33-
};
35+
};
3436
*/
3537

36-
DisparityConverter::DisparityConverter(const std::string frameName, float focalLength, float baseline, float minDepth, float maxDepth)
37-
: _frameName(frameName), _focalLength(focalLength), _baseline(baseline / 100.0), _minDepth(minDepth / 100.0), _maxDepth(maxDepth / 100.0){}
38+
DisparityConverter::DisparityConverter(const std::string frameName, float focalLength, float baseline, float minDepth, float maxDepth)
39+
: _frameName(frameName), _focalLength(focalLength), _baseline(baseline / 100.0), _minDepth(minDepth / 100.0), _maxDepth(maxDepth / 100.0) {}
3840

39-
void DisparityConverter::toRosMsg(std::shared_ptr<dai::ImgFrame> inData,
40-
stereo_msgs::DisparityImage &outDispImageMsg) {
41+
void DisparityConverter::toRosMsg(std::shared_ptr<dai::ImgFrame> inData, stereo_msgs::DisparityImage& outDispImageMsg) {
42+
auto tstamp = inData->getTimestamp();
43+
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tstamp.time_since_epoch()).count();
44+
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tstamp.time_since_epoch()).count() % 1000000000UL;
4145

46+
outDispImageMsg.header.seq = inData->getSequenceNum();
47+
outDispImageMsg.header.stamp = ros::Time(sec, nsec);
48+
outDispImageMsg.header.frame_id = _frameName;
49+
outDispImageMsg.f = _focalLength;
50+
outDispImageMsg.T = _baseline; // TODO(Sachin): Change this units
51+
outDispImageMsg.min_disparity = _focalLength * _baseline / _maxDepth;
52+
outDispImageMsg.max_disparity = _focalLength * _baseline / _minDepth;
4253

43-
auto tstamp = inData->getTimestamp();
44-
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(
45-
tstamp.time_since_epoch())
46-
.count();
47-
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(
48-
tstamp.time_since_epoch())
49-
.count() %
50-
1000000000UL;
51-
52-
outDispImageMsg.header.seq = inData->getSequenceNum();
53-
outDispImageMsg.header.stamp = ros::Time(sec, nsec);
54-
outDispImageMsg.header.frame_id = _frameName;
55-
outDispImageMsg.f = _focalLength;
56-
outDispImageMsg.T = _baseline; // TODO(Sachin): Change this units
57-
outDispImageMsg.min_disparity = _focalLength * _baseline / _maxDepth;
58-
outDispImageMsg.max_disparity = _focalLength * _baseline / _minDepth;
59-
60-
sensor_msgs::Image& outImageMsg = outDispImageMsg.image;
54+
sensor_msgs::Image& outImageMsg = outDispImageMsg.image;
6155

6256
// copying the data to ros msg
6357
// outDispImageMsg.header = imgHeader;
6458
// std::string temp_str(encodingEnumMap[inData->getType()]);
6559
outImageMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
6660
outImageMsg.header = outDispImageMsg.header;
67-
if(inData->getType() == dai::RawImgFrame::Type::RAW8){
68-
outDispImageMsg.delta_d = 1;
69-
size_t size = inData->getData().size() * sizeof(float);
70-
outImageMsg.data.resize(size);
71-
outImageMsg.height = inData->getHeight();
72-
outImageMsg.width = inData->getWidth();
73-
outImageMsg.step = size / inData->getHeight();
74-
outImageMsg.is_bigendian = true;
75-
76-
std::vector<float> convertedData(inData->getData().begin(),inData->getData().end());
77-
unsigned char *imageMsgDataPtr =
78-
reinterpret_cast<unsigned char *>(outImageMsg.data.data());
79-
80-
unsigned char *daiImgData =
81-
reinterpret_cast<unsigned char *>(convertedData.data());
82-
83-
// TODO(Sachin): Try using assign since it is a vector
84-
// img->data.assign(packet.data->cbegin(), packet.data->cend());
85-
memcpy(imageMsgDataPtr, daiImgData, size);
86-
87-
}else{
88-
outDispImageMsg.delta_d = 1 / 32;
89-
size_t size = inData->getHeight() * inData->getWidth() * sizeof(float);
90-
outImageMsg.data.resize(size);
91-
outImageMsg.height = inData->getHeight();
92-
outImageMsg.width = inData->getWidth();
93-
outImageMsg.step = size / inData->getHeight();
94-
outImageMsg.is_bigendian = true;
95-
unsigned char *daiImgData =
96-
reinterpret_cast<unsigned char *>(inData->getData().data());
97-
98-
std::vector<int16_t> raw16Data(inData->getHeight() * inData->getWidth());
99-
unsigned char *raw16DataPtr = reinterpret_cast<unsigned char *>(raw16Data.data());
100-
memcpy(raw16DataPtr, daiImgData, inData->getData().size());
101-
std::vector<float> convertedData;
102-
std::transform(raw16Data.begin(), raw16Data.end(), std::back_inserter(convertedData),
103-
[](int16_t disp) -> std::size_t { return static_cast<float>(disp) / 32.0; });
104-
105-
unsigned char *imageMsgDataPtr = reinterpret_cast<unsigned char *>(outImageMsg.data.data());
106-
unsigned char *convertedDataPtr = reinterpret_cast<unsigned char *>(convertedData.data());
107-
memcpy(imageMsgDataPtr, convertedDataPtr, size);
108-
61+
if(inData->getType() == dai::RawImgFrame::Type::RAW8) {
62+
outDispImageMsg.delta_d = 1;
63+
size_t size = inData->getData().size() * sizeof(float);
64+
outImageMsg.data.resize(size);
65+
outImageMsg.height = inData->getHeight();
66+
outImageMsg.width = inData->getWidth();
67+
outImageMsg.step = size / inData->getHeight();
68+
outImageMsg.is_bigendian = true;
69+
70+
std::vector<float> convertedData(inData->getData().begin(), inData->getData().end());
71+
unsigned char* imageMsgDataPtr = reinterpret_cast<unsigned char*>(outImageMsg.data.data());
72+
73+
unsigned char* daiImgData = reinterpret_cast<unsigned char*>(convertedData.data());
74+
75+
// TODO(Sachin): Try using assign since it is a vector
76+
// img->data.assign(packet.data->cbegin(), packet.data->cend());
77+
memcpy(imageMsgDataPtr, daiImgData, size);
78+
79+
} else {
80+
outDispImageMsg.delta_d = 1 / 32;
81+
size_t size = inData->getHeight() * inData->getWidth() * sizeof(float);
82+
outImageMsg.data.resize(size);
83+
outImageMsg.height = inData->getHeight();
84+
outImageMsg.width = inData->getWidth();
85+
outImageMsg.step = size / inData->getHeight();
86+
outImageMsg.is_bigendian = true;
87+
unsigned char* daiImgData = reinterpret_cast<unsigned char*>(inData->getData().data());
88+
89+
std::vector<int16_t> raw16Data(inData->getHeight() * inData->getWidth());
90+
unsigned char* raw16DataPtr = reinterpret_cast<unsigned char*>(raw16Data.data());
91+
memcpy(raw16DataPtr, daiImgData, inData->getData().size());
92+
std::vector<float> convertedData;
93+
std::transform(
94+
raw16Data.begin(), raw16Data.end(), std::back_inserter(convertedData), [](int16_t disp) -> std::size_t { return static_cast<float>(disp) / 32.0; });
95+
96+
unsigned char* imageMsgDataPtr = reinterpret_cast<unsigned char*>(outImageMsg.data.data());
97+
unsigned char* convertedDataPtr = reinterpret_cast<unsigned char*>(convertedData.data());
98+
memcpy(imageMsgDataPtr, convertedDataPtr, size);
10999
}
110-
111-
return;
100+
101+
return;
112102
}
113103

114104
/* void DisparityConverter::toDaiMsg(const stereo_msgs::DisparityImage &inMsg,
@@ -158,11 +148,10 @@ void DisparityConverter::toRosMsg(std::shared_ptr<dai::ImgFrame> inData,
158148
outData.setType(revEncodingIter->first);
159149
} */
160150

161-
stereo_msgs::DisparityImagePtr
162-
DisparityConverter::toRosMsgPtr(std::shared_ptr<dai::ImgFrame> inData) {
163-
stereo_msgs::DisparityImagePtr ptr = boost::make_shared<stereo_msgs::DisparityImage>();
164-
toRosMsg(inData, *ptr);
165-
return ptr;
151+
stereo_msgs::DisparityImagePtr DisparityConverter::toRosMsgPtr(std::shared_ptr<dai::ImgFrame> inData) {
152+
stereo_msgs::DisparityImagePtr ptr = boost::make_shared<stereo_msgs::DisparityImage>();
153+
toRosMsg(inData, *ptr);
154+
return ptr;
166155
}
167156

168-
} // namespace dai::rosBridge
157+
} // namespace dai::rosBridge

‎depthai_bridge/src/ImageConverter.cpp

+223-213
Large diffs are not rendered by default.
+43-53
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,28 @@
1+
#include <ros/ros.h>
2+
13
#include <boost/make_shared.hpp>
24
#include <depthai_bridge/ImgDetectionConverter.hpp>
3-
#include <ros/ros.h>
45

5-
namespace dai::rosBridge{
6+
namespace dai::rosBridge {
67

7-
ImgDetectionConverter::ImgDetectionConverter(std::string frameName, int width,
8-
int height, bool normalized)
9-
: _frameName(frameName), _width(width), _height(height),
10-
_normalized(normalized), _sequenceNum(0) {}
8+
ImgDetectionConverter::ImgDetectionConverter(std::string frameName, int width, int height, bool normalized)
9+
: _frameName(frameName), _width(width), _height(height), _normalized(normalized), _sequenceNum(0) {}
1110

1211
void ImgDetectionConverter::toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData,
13-
vision_msgs::Detection2DArray &opDetectionMsg, TimePoint tStamp,
14-
int32_t sequenceNum) {
15-
12+
vision_msgs::Detection2DArray& opDetectionMsg,
13+
TimePoint tStamp,
14+
int32_t sequenceNum) {
1615
toRosMsg(inNetData, opDetectionMsg);
17-
if (sequenceNum != -1)
18-
_sequenceNum = sequenceNum;
16+
if(sequenceNum != -1) _sequenceNum = sequenceNum;
1917

20-
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(
21-
tStamp.time_since_epoch())
22-
.count();
23-
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(
24-
tStamp.time_since_epoch())
25-
.count() %
26-
1000000000UL;
18+
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tStamp.time_since_epoch()).count();
19+
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tStamp.time_since_epoch()).count() % 1000000000UL;
2720
opDetectionMsg.header.seq = _sequenceNum;
2821
opDetectionMsg.header.stamp = ros::Time(sec, nsec);
2922
opDetectionMsg.header.frame_id = _frameName;
30-
}
31-
32-
void ImgDetectionConverter::toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData,
33-
vision_msgs::Detection2DArray &opDetectionMsg) {
23+
}
3424

25+
void ImgDetectionConverter::toRosMsg(std::shared_ptr<dai::ImgDetections> inNetData, vision_msgs::Detection2DArray& opDetectionMsg) {
3526
// if (inNetData->detections.size() == 0)
3627
// throw std::runtime_error(
3728
// "Make sure to send the detections with size greater than 0");
@@ -45,42 +36,41 @@ void ImgDetectionConverter::toRosMsg(std::shared_ptr<dai::ImgDetections> inNetDa
4536

4637
// TODO(Sachin): check if this works fine for normalized detection
4738
// publishing
48-
for (int i = 0; i < inNetData->detections.size(); ++i) {
49-
int xMin, yMin, xMax, yMax;
50-
if (_normalized) {
51-
xMin = inNetData->detections[i].xmin;
52-
yMin = inNetData->detections[i].ymin;
53-
xMax = inNetData->detections[i].xmax;
54-
yMax = inNetData->detections[i].ymax;
55-
} else {
56-
xMin = inNetData->detections[i].xmin * _width;
57-
yMin = inNetData->detections[i].ymin * _height;
58-
xMax = inNetData->detections[i].xmax * _width;
59-
yMax = inNetData->detections[i].ymax * _height;
60-
}
61-
62-
float xSize = xMax - xMin;
63-
float ySize = yMax - yMin;
64-
float xCenter = xMin + xSize / 2;
65-
float yCenter = yMin + ySize / 2;
66-
67-
opDetectionMsg.detections[i].results.resize(1);
68-
opDetectionMsg.detections[i].results[0].id = inNetData->detections[i].label;
69-
opDetectionMsg.detections[i].results[0].score = inNetData->detections[i].confidence;
70-
71-
opDetectionMsg.detections[i].bbox.center.x = xCenter;
72-
opDetectionMsg.detections[i].bbox.center.y = yCenter;
73-
opDetectionMsg.detections[i].bbox.size_x = xSize;
74-
opDetectionMsg.detections[i].bbox.size_y = ySize;
75-
39+
for(int i = 0; i < inNetData->detections.size(); ++i) {
40+
int xMin, yMin, xMax, yMax;
41+
if(_normalized) {
42+
xMin = inNetData->detections[i].xmin;
43+
yMin = inNetData->detections[i].ymin;
44+
xMax = inNetData->detections[i].xmax;
45+
yMax = inNetData->detections[i].ymax;
46+
} else {
47+
xMin = inNetData->detections[i].xmin * _width;
48+
yMin = inNetData->detections[i].ymin * _height;
49+
xMax = inNetData->detections[i].xmax * _width;
50+
yMax = inNetData->detections[i].ymax * _height;
51+
}
52+
53+
float xSize = xMax - xMin;
54+
float ySize = yMax - yMin;
55+
float xCenter = xMin + xSize / 2;
56+
float yCenter = yMin + ySize / 2;
57+
58+
opDetectionMsg.detections[i].results.resize(1);
59+
opDetectionMsg.detections[i].results[0].id = inNetData->detections[i].label;
60+
opDetectionMsg.detections[i].results[0].score = inNetData->detections[i].confidence;
61+
62+
opDetectionMsg.detections[i].bbox.center.x = xCenter;
63+
opDetectionMsg.detections[i].bbox.center.y = yCenter;
64+
opDetectionMsg.detections[i].bbox.size_x = xSize;
65+
opDetectionMsg.detections[i].bbox.size_y = ySize;
7666
}
7767
_sequenceNum++;
78-
}
68+
}
7969

80-
vision_msgs::Detection2DArray::Ptr ImgDetectionConverter::toRosMsgPtr(std::shared_ptr<dai::ImgDetections> inNetData){
70+
vision_msgs::Detection2DArray::Ptr ImgDetectionConverter::toRosMsgPtr(std::shared_ptr<dai::ImgDetections> inNetData) {
8171
vision_msgs::Detection2DArray::Ptr ptr = boost::make_shared<vision_msgs::Detection2DArray>();
8272
toRosMsg(inNetData, *ptr);
8373
return ptr;
84-
}
74+
}
8575

8676
} // namespace dai::rosBridge
Original file line numberDiff line numberDiff line change
@@ -1,37 +1,28 @@
1+
#include <ros/ros.h>
2+
13
#include <boost/make_shared.hpp>
24
#include <depthai_bridge/SpatialDetectionConverter.hpp>
3-
#include <ros/ros.h>
45

5-
namespace dai::rosBridge{
6+
namespace dai::rosBridge {
67

7-
SpatialDetectionConverter::SpatialDetectionConverter(std::string frameName, int width,
8-
int height, bool normalized)
9-
: _frameName(frameName), _width(width), _height(height),
10-
_normalized(normalized), _sequenceNum(0) {}
8+
SpatialDetectionConverter::SpatialDetectionConverter(std::string frameName, int width, int height, bool normalized)
9+
: _frameName(frameName), _width(width), _height(height), _normalized(normalized), _sequenceNum(0) {}
1110

1211
void SpatialDetectionConverter::toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData,
13-
depthai_ros_msgs::SpatialDetectionArray &opDetectionMsg, TimePoint tStamp,
14-
int32_t sequenceNum) {
15-
12+
depthai_ros_msgs::SpatialDetectionArray& opDetectionMsg,
13+
TimePoint tStamp,
14+
int32_t sequenceNum) {
1615
toRosMsg(inNetData, opDetectionMsg);
17-
if (sequenceNum != -1)
18-
_sequenceNum = sequenceNum;
16+
if(sequenceNum != -1) _sequenceNum = sequenceNum;
1917

20-
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(
21-
tStamp.time_since_epoch())
22-
.count();
23-
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(
24-
tStamp.time_since_epoch())
25-
.count() %
26-
1000000000UL;
18+
int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tStamp.time_since_epoch()).count();
19+
int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tStamp.time_since_epoch()).count() % 1000000000UL;
2720
opDetectionMsg.header.seq = _sequenceNum;
2821
opDetectionMsg.header.stamp = ros::Time(sec, nsec);
2922
opDetectionMsg.header.frame_id = _frameName;
30-
}
31-
32-
void SpatialDetectionConverter::toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData,
33-
depthai_ros_msgs::SpatialDetectionArray &opDetectionMsg) {
23+
}
3424

25+
void SpatialDetectionConverter::toRosMsg(std::shared_ptr<dai::SpatialImgDetections> inNetData, depthai_ros_msgs::SpatialDetectionArray& opDetectionMsg) {
3526
// if (inNetData->detections.size() == 0)
3627
// throw std::runtime_error(
3728
// "Make sure to send the detections with size greater than 0");
@@ -45,48 +36,47 @@ void SpatialDetectionConverter::toRosMsg(std::shared_ptr<dai::SpatialImgDetectio
4536

4637
// TODO(Sachin): check if this works fine for normalized detection
4738
// publishing
48-
for (int i = 0; i < inNetData->detections.size(); ++i) {
49-
int xMin, yMin, xMax, yMax;
50-
if (_normalized) {
51-
xMin = inNetData->detections[i].xmin;
52-
yMin = inNetData->detections[i].ymin;
53-
xMax = inNetData->detections[i].xmax;
54-
yMax = inNetData->detections[i].ymax;
55-
} else {
56-
xMin = inNetData->detections[i].xmin * _width;
57-
yMin = inNetData->detections[i].ymin * _height;
58-
xMax = inNetData->detections[i].xmax * _width;
59-
yMax = inNetData->detections[i].ymax * _height;
60-
}
61-
62-
float xSize = xMax - xMin;
63-
float ySize = yMax - yMin;
64-
float xCenter = xMin + xSize / 2;
65-
float yCenter = yMin + ySize / 2;
66-
67-
opDetectionMsg.detections[i].results.resize(1);
68-
opDetectionMsg.detections[i].results[0].id = inNetData->detections[i].label;
69-
opDetectionMsg.detections[i].results[0].score = inNetData->detections[i].confidence;
70-
71-
opDetectionMsg.detections[i].bbox.center.x = xCenter;
72-
opDetectionMsg.detections[i].bbox.center.y = yCenter;
73-
opDetectionMsg.detections[i].bbox.size_x = xSize;
74-
opDetectionMsg.detections[i].bbox.size_y = ySize;
75-
// opDetectionMsg.detections[i].is_tracking = _isTracking;
76-
77-
// converting mm to meters since per ros rep-103 lenght should always be in meters
78-
opDetectionMsg.detections[i].position.x = inNetData->detections[i].spatialCoordinates.x/1000;
79-
opDetectionMsg.detections[i].position.y = inNetData->detections[i].spatialCoordinates.y/1000;
80-
opDetectionMsg.detections[i].position.z = inNetData->detections[i].spatialCoordinates.z/1000;
81-
39+
for(int i = 0; i < inNetData->detections.size(); ++i) {
40+
int xMin, yMin, xMax, yMax;
41+
if(_normalized) {
42+
xMin = inNetData->detections[i].xmin;
43+
yMin = inNetData->detections[i].ymin;
44+
xMax = inNetData->detections[i].xmax;
45+
yMax = inNetData->detections[i].ymax;
46+
} else {
47+
xMin = inNetData->detections[i].xmin * _width;
48+
yMin = inNetData->detections[i].ymin * _height;
49+
xMax = inNetData->detections[i].xmax * _width;
50+
yMax = inNetData->detections[i].ymax * _height;
51+
}
52+
53+
float xSize = xMax - xMin;
54+
float ySize = yMax - yMin;
55+
float xCenter = xMin + xSize / 2;
56+
float yCenter = yMin + ySize / 2;
57+
58+
opDetectionMsg.detections[i].results.resize(1);
59+
opDetectionMsg.detections[i].results[0].id = inNetData->detections[i].label;
60+
opDetectionMsg.detections[i].results[0].score = inNetData->detections[i].confidence;
61+
62+
opDetectionMsg.detections[i].bbox.center.x = xCenter;
63+
opDetectionMsg.detections[i].bbox.center.y = yCenter;
64+
opDetectionMsg.detections[i].bbox.size_x = xSize;
65+
opDetectionMsg.detections[i].bbox.size_y = ySize;
66+
// opDetectionMsg.detections[i].is_tracking = _isTracking;
67+
68+
// converting mm to meters since per ros rep-103 lenght should always be in meters
69+
opDetectionMsg.detections[i].position.x = inNetData->detections[i].spatialCoordinates.x / 1000;
70+
opDetectionMsg.detections[i].position.y = inNetData->detections[i].spatialCoordinates.y / 1000;
71+
opDetectionMsg.detections[i].position.z = inNetData->detections[i].spatialCoordinates.z / 1000;
8272
}
8373
_sequenceNum++;
84-
}
74+
}
8575

86-
depthai_ros_msgs::SpatialDetectionArray::Ptr SpatialDetectionConverter::toRosMsgPtr(std::shared_ptr<dai::SpatialImgDetections> inNetData){
76+
depthai_ros_msgs::SpatialDetectionArray::Ptr SpatialDetectionConverter::toRosMsgPtr(std::shared_ptr<dai::SpatialImgDetections> inNetData) {
8777
depthai_ros_msgs::SpatialDetectionArray::Ptr ptr = boost::make_shared<depthai_ros_msgs::SpatialDetectionArray>();
8878
toRosMsg(inNetData, *ptr);
8979
return ptr;
90-
}
80+
}
9181

9282
} // namespace dai::rosBridge

‎depthai_ros_msgs/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ add_message_files (
2525
add_service_files (
2626
FILES
2727
TriggerNamed.srv
28+
NormalizedImageCrop.srv
2829
)
2930

3031
## Generate added messages and services with any dependencies listed here
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
geometry_msgs/Pose2D topLeft
2+
geometry_msgs/Pose2D bottomRight
3+
---

0 commit comments

Comments
 (0)
Please sign in to comment.