Skip to content

Upgrade code to Humble #9

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
build/*
log/*
install/*
23 changes: 16 additions & 7 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ if(NOT CMAKE_C_STANDARD)
set(CMAKE_C_STANDARD 99)
endif()

# Default to C++14
# Default to C++17
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD 17)
endif()

if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
Expand All @@ -42,15 +42,22 @@ find_package(rclcpp REQUIRED)
find_package(std_msgs REQUIRED)
find_package(vision_msgs REQUIRED)
find_package(geometry_msgs REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(tf2_ros REQUIRED)
find_package(tf2_geometry_msgs REQUIRED)
find_package(message_filters REQUIRED)
find_package(PCL REQUIRED)
find_package(pcl_conversions REQUIRED)
find_package(CUDA REQUIRED)

set(CUDA_VERSION 11.3)

# change here according to your version
set(CUDA_VERSION 11.6)
set(CUDA_TOOLKIT_ROOT_DIR /usr/local/cuda-${CUDA_VERSION})

SET(CMAKE_BUILD_TYPE "Release")
add_compile_options(-W)
add_compile_options(-std=c++11)
# add_compile_options(-std=c++17)

set(SMS 50 52 53 60 61 62 70 72 75 80 86)
foreach(sm ${SMS})
Expand All @@ -70,12 +77,14 @@ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS}
-Xlinker -L/usr/${ARCH}-linux-gnu/lib/
)

set(TENSORRT_INCLUDE_DIRS /usr/include/${ARCH}-linux-gnu/)
set(TENSORRT_LIBRARY_DIRS /usr/lib/${ARCH}-linux-gnu/)
set(TENSORRT_INCLUDE_DIRS ~/TensorRT/include/)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these should not be hard coded

set(TENSORRT_ONNX_INCLUDE_DIRS ~/TensorRT/parsers/onnx/)
set(TENSORRT_LIBRARY_DIRS ~/TensorRT-8.4.1.5/lib/)

include_directories(
${CUDA_INCLUDE_DIRS}
${TENSORRT_INCLUDE_DIRS}
${TENSORRT_ONNX_INCLUDE_DIRS}
../include/
)

Expand All @@ -92,7 +101,7 @@ file(GLOB_RECURSE SOURCE_FILES


cuda_add_executable(${PROJECT_NAME} src/pp_inference_node.cpp ${SOURCE_FILES})
ament_target_dependencies(${PROJECT_NAME} rclcpp std_msgs vision_msgs geometry_msgs PCL CUDA)
ament_target_dependencies(${PROJECT_NAME} rclcpp std_msgs vision_msgs geometry_msgs sensor_msgs message_filters tf2_geometry_msgs tf2_ros pcl_conversions PCL CUDA)

target_link_libraries(${PROJECT_NAME}
libnvinfer.so
Expand Down
19 changes: 13 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,14 @@ Node details:
</p>

## Requirements
Tested on Ubuntu 20.04 and ROS2 Foxy.
Tested on Ubuntu 22.04 and ROS2 Humble.
- pcl library
- TensorRT 8.2(or above)
- TensorRT OSS 22.02 (see how to install below)
- TensorRT OSS 22.02 (or above)
```
sudo apt install libpcl-dev
```

```
git clone -b 22.02 https://github.com/NVIDIA/TensorRT.git TensorRT
cd TensorRT
Expand All @@ -31,9 +36,11 @@ cp libnvinfer_plugin.so.8.2.* /usr/lib/$ARCH-linux-gnu/libnvinfer_plugin.so.8.2.
cp libnvinfer_plugin_static.a /usr/lib/$ARCH-linux-gnu/libnvinfer_plugin_static.a
```
- [TAO Converter](https://docs.nvidia.com/tao/tao-toolkit/text/tensorrt.html#installing-the-tao-converter)
- [ROS2 Foxy](https://docs.ros.org/en/foxy/Installation.html)
- [ROS2 Humble](https://docs.ros.org/en/humble/Installation.html)

## Usage
### Cuda version
Change the CMakeLists.txt

1. This project assumes that you have already trained your model using NVIDIA TAO Toolkit and have an **.etlt** file. If not, please refer [here](https://docs.nvidia.com/tao/tao-toolkit/text/point_cloud/index.html) for information on how to do this. The pre-trained PointPillars model used by this project can be found [here](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/tao/models/pointpillarnet/files).
2. Use **tao-converter** to generate a TensorRT engine from your model. For instance:
Expand All @@ -54,8 +61,8 @@ Argument definitions:
- -t: Desired engine data type. The options are fp32 or fp16 (default value is fp32).

3. Source your ROS2 environment:
`source /opt/ros/foxy/setup.bash`
4. Create a ROS2 workspace (more information can be found [here](https://docs.ros.org/en/foxy/Tutorials/Beginner-Client-Libraries/Creating-A-Workspace/Creating-A-Workspace.html)):
`source /opt/ros/humble/setup.bash`
4. Create a ROS2 workspace (more information can be found [here](https://docs.ros.org/en/humble/Tutorials/Beginner-Client-Libraries/Creating-A-Workspace/Creating-A-Workspace.html)):
```
mkdir -p pointpillars_ws/src
cd pointpillars_ws/src
Expand All @@ -73,7 +80,7 @@ Clone this repository in `pointpillars_ws/src`. The directory structure should l
```
5. Resolve missing dependencies by running the following command from `pointpillars_ws`:

`rosdep install -i --from-path src --rosdistro foxy -y`
`rosdep install -i --from-path src --rosdistro humble -y`

6. Specify parameters including the path to your TensorRT engine in the launch file. Please see [Modifying parameters in the launch file](https://github.com/NVIDIA-AI-IOT/ros2_tao_pointpillars#modifying-parameters-in-the-launch-file) below for how to do this.

Expand Down
2 changes: 1 addition & 1 deletion include/pp_infer/pointpillar.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class PointPillar {
~PointPillar(void);
int getPointSize();
std::vector<Bndbox> doinfer(
void*points_data,
void* points_data,
unsigned int* points_size,
std::vector<Bndbox> &nms_pred,
float nms_iou_thresh,
Expand Down
15 changes: 9 additions & 6 deletions src/pp_inference_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
#include "sensor_msgs/msg/point_cloud2.hpp"
#include "vision_msgs/msg/detection3_d_array.hpp"
#include "pcl_conversions/pcl_conversions.h"
#include "../include/pp_infer/point_cloud2_iterator.hpp"
#include "sensor_msgs/point_cloud2_iterator.hpp"
#include <tf2/LinearMath/Quaternion.h>
#include <tf2_geometry_msgs/tf2_geometry_msgs.h>

Expand Down Expand Up @@ -73,7 +73,7 @@ class MinimalPublisher : public rclcpp::Node
MinimalPublisher()
: Node("minimal_publisher")
{
this->declare_parameter("class_names");
this->declare_parameter<std::vector<std::string>>("class_names");
this->declare_parameter<float>("nms_iou_thresh", 0.01);
this->declare_parameter<int>("pre_nms_top_n", 4096);
this->declare_parameter<std::string>("model_path", "");
Expand All @@ -94,10 +94,13 @@ class MinimalPublisher : public rclcpp::Node
cudaStream_t stream = NULL;
pointpillar = new PointPillar(model_path, engine_path, stream, data_type);

publisher_ = this->create_publisher<vision_msgs::msg::Detection3DArray>("bbox", 700);
publisher_ = this->create_publisher<vision_msgs::msg::Detection3DArray>("bbox", 1);

//QoS: sensor profile
static const rclcpp::QoS sensor_qos_profile = rclcpp::QoS(5).best_effort().durability_volatile();

subscription_ = this->create_subscription<sensor_msgs::msg::PointCloud2>(
"/point_cloud", 700, std::bind(&MinimalPublisher::topic_callback, this, _1));
"/point_cloud", sensor_qos_profile, std::bind(&MinimalPublisher::topic_callback, this, _1));

}

Expand Down Expand Up @@ -196,8 +199,8 @@ class MinimalPublisher : public rclcpp::Node

detection.bbox.center.orientation = orientation;

hyp.id = std::to_string(nms_pred[i].id);
hyp.score = nms_pred[i].score;
hyp.hypothesis.class_id = std::to_string(nms_pred[i].id);
hyp.hypothesis.score = nms_pred[i].score;

detection.header = msg->header;

Expand Down