From 05c8b7df9bbb82e97d066e7532e7a05813d67e6b Mon Sep 17 00:00:00 2001 From: robotrancher <93450393+robotrancher@users.noreply.github.com> Date: Wed, 12 Jan 2022 11:04:30 -0800 Subject: [PATCH] Fix runtime on noetic and linux (#24) * Fix runtime on noetic and linux * Fixup Doc Co-authored-by: Lou Amadio --- README.md | 43 ++++++++++++------------------- ros_msft_onnx/CMakeLists.txt | 24 ++++++++++++----- ros_msft_onnx/src/pose_parser.cpp | 20 +++++++------- ros_msft_onnx/src/yolo_box.cpp | 8 +++--- 4 files changed, 49 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index 9bd37aa..24c3bfd 100644 --- a/README.md +++ b/README.md @@ -6,56 +6,47 @@ ONNX Runtime Execution Providers (EPs) enables the execution of any ONNX model u In simple terms, developers no longer need to worry about the nuances of hardware specific custom libraries to accelerate their machine learning models. This repository demonstrates that by enabling the same code with ROS 2 to run on different hardware platforms using their respective AI acceleration libraries for optimized execution of the ONNX model. -## System Requirement +## Requirements * Microsoft Windows 10 64-bit or Ubuntu 20.04 LTS x86_64 * ROS Noetic - * To make use of the hardware acceleration, the system is required to be compatible with [**CUDA 10.1**](https://developer.nvidia.com/cuda-toolkit) and [**cuDNN 7.6.5**](https://developer.nvidia.com/cudnn). - -> For GPU support, please follow the installation steps on NVIDIA portal before proceeding. + * GPU acceleration on Linux + * [**CUDA 11**](https://developer.nvidia.com/cuda-toolkit) + * [**cuDNN 8.x**](https://developer.nvidia.com/cudnn) ## How to Build The Onnx ROS Node is distrubted as source. To consume it in your robot, clone the ros_msft_onnx sources into your workspace. -* Windows -```Batchfile +### Windows +Windows will automatically use dedicated ML hardware, GPU or CPU depending on the best combination for the running ML model. + +```cmd mkdir c:\workspace\onnx_demo\src cd c:\workspace\onnx_demo\src git clone https://github.com/ms-iot/ros_msft_onnx -b noetic-devel ::For running the samples, clone `ros_msft_camera` as well git clone https://github.com/ms-iot/ros_msft_camera --recursive +catkin_make ``` -* Ubuntu -```sh +### Ubuntu + +```bash mkdir -p onnx_demo/src cd onnx_demo/src git clone https://github.com/ms-iot/ros_msft_onnx -b noetic-devel -#For running the samples, clone `ros_msft_camera` as well +# For running the samples, clone `cv_camera` as well git clone https://github.com/OTL/cv_camera --recursive -``` - -## Building -ONNX Runtime team is releasing different binaries for CPU and GPU (CUDA) support. To switch between the two, a workspace rebuild is required. - +cd .. -Make sure to source your ROS version before building. - -* CPU Processing - -```Batchfile -cd onnx_demo - -catkin_make -DCUDA_SUPPORT=OFF +catkin_make ``` -* GPU Processing (CUDA) - -```Batchfile -cd onnx_demo +If you would like to use CUDA support and have the software installed, you can build using the following command: +``` bash catkin_make -DCUDA_SUPPORT=ON ``` diff --git a/ros_msft_onnx/CMakeLists.txt b/ros_msft_onnx/CMakeLists.txt index 024b7b3..fedb638 100644 --- a/ros_msft_onnx/CMakeLists.txt +++ b/ros_msft_onnx/CMakeLists.txt @@ -47,7 +47,15 @@ generate_dynamic_reconfigure_options( catkin_package( INCLUDE_DIRS include - CATKIN_DEPENDS std_msgs geometry_msgs visualization_msgs ros_msft_onnx_msgs image_transport dynamic_reconfigure roscpp cv_bridge tf + CATKIN_DEPENDS + std_msgs + geometry_msgs + visualization_msgs + ros_msft_onnx_msgs + image_transport + dynamic_reconfigure + roscpp + cv_bridge tf CFG_EXTRAS "onnxruntime_vendor-extras.cmake" ) @@ -63,14 +71,18 @@ target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIE message("Installing onnxruntime_vendor Nuget package") -if(CUDA_SUPPORT) +if(MSVC) + set(ONNX_RUNTIME "Microsoft.ML.OnnxRuntime.DirectML.1.7.0") + set(PACKAGE_URL "https://www.nuget.org/api/v2/package/Microsoft.ML.OnnxRuntime.DirectML/1.7.0") + set(PACKAGE_SHA512 "2e5bd2c0ade72444d4efdfbd6a75571aaa72045769f9b5847186129c9e5e667ad080d5d2b9a12cce88c9eee68302be89cdb7030ccefa3d572e591b1c453c7340") +elseif(CUDA_SUPPORT) set(ONNX_RUNTIME "Microsoft.ML.OnnxRuntime.Gpu.1.7.1") set(PACKAGE_URL "https://www.nuget.org/api/v2/package/Microsoft.ML.OnnxRuntime.Gpu/1.7.1") set(PACKAGE_SHA512 "41112118007aae34fcc38100152df6e6fa5fc567e61aa4ded42a26d39751f1be7ec225c0d73799f065015e284f0fb9bd7e0835c733e9abad5b0243a391411f8d") else() - set(ONNX_RUNTIME "Microsoft.ML.OnnxRuntime.DirectML.1.7.0") - set(PACKAGE_URL "https://www.nuget.org/api/v2/package/Microsoft.ML.OnnxRuntime.DirectML/1.7.0") - set(PACKAGE_SHA512 "2e5bd2c0ade72444d4efdfbd6a75571aaa72045769f9b5847186129c9e5e667ad080d5d2b9a12cce88c9eee68302be89cdb7030ccefa3d572e591b1c453c7340") + set(ONNX_RUNTIME "Microsoft.ML.OnnxRuntime.1.7.0") + set(PACKAGE_URL "https://www.nuget.org/api/v2/package/Microsoft.ML.OnnxRuntime/1.7.0") + set(PACKAGE_SHA512 "1fc15386bdfa455f457e50899e3c9c454aafbdc345799dcf4ecfd6990a9dbd8cd7f0b1f3bf412c47c900543c535f95aa1cb1e14e9851cb9b600c60a981f38a50") endif() file(DOWNLOAD @@ -90,7 +102,7 @@ if(MSVC) else() set(ARCH "linux-x64") - execute_process(COMMAND unzip "${CMAKE_CURRENT_BINARY_DIR}/onnxruntime.nuget" + execute_process(COMMAND unzip -oq "${CMAKE_CURRENT_BINARY_DIR}/onnxruntime.nuget" WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${ONNX_RUNTIME}" ) diff --git a/ros_msft_onnx/src/pose_parser.cpp b/ros_msft_onnx/src/pose_parser.cpp index 370014b..41076b2 100644 --- a/ros_msft_onnx/src/pose_parser.cpp +++ b/ros_msft_onnx/src/pose_parser.cpp @@ -1,10 +1,10 @@ #include -#include -#include #include -#include #include #include +#include +#include +#include #include "ros_msft_onnx/ros_msft_onnx.h" #include "ros_msft_onnx/pose_parser.h" @@ -203,8 +203,8 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) if (_fake) { - std::vector markers; visualization_msgs::Marker marker; + visualization_msgs::MarkerArray markers; marker.header.frame_id = _linkName; marker.header.stamp = ros::Time(); marker.ns = "onnx"; @@ -234,7 +234,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) marker.color.g = 0.0; marker.color.b = 1.0; - markers.push_back(marker); + markers.markers.push_back(marker); _detect_pub.publish(markers); return; } @@ -284,7 +284,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) tf::Quaternion poseQuat; tfRod.getRotation(poseQuat); - std::vector markers; + visualization_msgs::MarkerArray markers; visualization_msgs::Marker marker; double x = tvec.at(0) / 1000.0; double y = tvec.at(1) / 1000.0; @@ -294,7 +294,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) marker.mesh_resource = meshResource; tf::quaternionTFToMsg(poseQuat, marker.pose.orientation); - markers.push_back(marker); + markers.markers.push_back(marker); ros_msft_onnx_msgs::DetectedObjectPose doPose; @@ -329,7 +329,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) marker1.color.r = 1.0; marker1.color.g = 0.0; marker1.color.b = 0.0; tf::quaternionTFToMsg(poseQuat, marker1.pose.orientation); - markers.push_back(marker1); + markers.markers.push_back(marker1); visualization_msgs::Marker marker2; initMarker(marker2, 2, visualization_msgs::Marker::ARROW, x, y, z); @@ -341,7 +341,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) marker2.color.r = 0.0; marker2.color.g = 1.0; marker2.color.b = 0.0; tf::quaternionTFToMsg(poseQuat, marker2.pose.orientation); - markers.push_back(marker2); + markers.markers.push_back(marker2); visualization_msgs::Marker marker3; initMarker(marker3, 3, visualization_msgs::Marker::ARROW, x, y, z); @@ -353,7 +353,7 @@ void PoseProcessor::ProcessOutput(std::vector output, cv::Mat& image) marker3.color.r = 0.0; marker3.color.g = 0.0; marker3.color.b = 1.0; tf::quaternionTFToMsg(poseQuat, marker3.pose.orientation); - markers.push_back(marker3); + markers.markers.push_back(marker3); } _detect_pub.publish(markers); diff --git a/ros_msft_onnx/src/yolo_box.cpp b/ros_msft_onnx/src/yolo_box.cpp index 95974f9..dd07330 100644 --- a/ros_msft_onnx/src/yolo_box.cpp +++ b/ros_msft_onnx/src/yolo_box.cpp @@ -1,7 +1,7 @@ #include +#include #include #include -#include #include "ros_msft_onnx/ros_msft_onnx.h" #include "ros_msft_onnx/yolo_box.h" @@ -77,8 +77,8 @@ namespace yolo // If we found a person, send a message int count = 0; - std::vector markers; - for (std::vector::iterator it = boxes.begin(); it != boxes.end(); ++it) + visualization_msgs::MarkerArray markers; + for(std::vector::iterator it = boxes.begin(); it != boxes.end(); ++it) { for (auto label : _labels) { @@ -108,7 +108,7 @@ namespace yolo marker.color.g = 0.0; marker.color.b = 1.0; - markers.push_back(marker); + markers.markers.push_back(marker); if (_debug) {