Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(multiframe-pointpainting): add multi-sweep pointpainting #2124

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,40 @@
</group>
</group>

<!-- PointPainting -->
<group if="$(eval &quot;'$(var lidar_detection_model)'=='pointpainting'&quot;)">
<push-ros-namespace namespace="pointpainting"/>
<include file="$(find-pkg-share image_projection_based_fusion)/launch/pointpainting_fusion.launch.xml">
<arg name="input/camera_info0" value="$(var camera_info0)"/>
<arg name="input/rois0" value="/perception/object_recognition/detection/rois0"/>
<arg name="input/camera_info1" value="$(var camera_info1)"/>
<arg name="input/rois1" value="/perception/object_recognition/detection/rois1"/>
<arg name="input/camera_info2" value="$(var camera_info2)"/>
<arg name="input/rois2" value="/perception/object_recognition/detection/rois2"/>
<arg name="input/camera_info3" value="$(var camera_info3)"/>
<arg name="input/rois3" value="/perception/object_recognition/detection/rois3"/>
<arg name="input/camera_info4" value="$(var camera_info4)"/>
<arg name="input/rois4" value="/perception/object_recognition/detection/rois4"/>
<arg name="input/camera_info5" value="$(var camera_info5)"/>
<arg name="input/rois5" value="/perception/object_recognition/detection/rois5"/>
<arg name="input/camera_info6" value="$(var camera_info6)"/>
<arg name="input/rois6" value="/perception/object_recognition/detection/rois6"/>
<arg name="input/camera_info7" value="$(var camera_info7)"/>
<arg name="input/rois7" value="/perception/object_recognition/detection/rois7"/>
<arg name="input/rois_number" value="$(var image_number)"/>
<arg name="input/image0" value="$(var image_raw0)"/>
<arg name="input/image1" value="$(var image_raw1)"/>
<arg name="input/image2" value="$(var image_raw2)"/>
<arg name="input/image3" value="$(var image_raw3)"/>
<arg name="input/image4" value="$(var image_raw4)"/>
<arg name="input/image5" value="$(var image_raw5)"/>
<arg name="input/image6" value="$(var image_raw6)"/>
<arg name="input/image7" value="$(var image_raw7)"/>
<arg name="input/pointcloud" value="$(var input/pointcloud)"/>
<arg name="output/objects" value="objects"/>
</include>
</group>

<!-- Validator -->
<group>
<let name="validator/input/obstacle_pointcloud" value="/perception/object_recognition/detection/pointcloud_map_filtered/pointcloud" if="$(var use_pointcloud_map)"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

<!-- lidar param -->
<arg name="input/pointcloud"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`, `pointpainting`"/>
<arg name="use_pointcloud_map" default="true" description="use pointcloud map in detection"/>
<arg name="use_object_filter" default="true" description="use object filter"/>
<arg name="use_pointcloud_container" default="false" description="use pointcloud container for detection preprocessor"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
<arg name="input/pointcloud" default="/sensing/lidar/concatenated/pointcloud" description="The topic will be used in the detection module"/>
<arg name="vehicle_param_file" default="$(find-pkg-share vehicle_info_util)/config/vehicle_info.param.yaml" description="path to the file of vehicle info yaml"/>
<arg name="mode" default="camera_lidar_fusion" description="options: `camera_lidar_radar_fusion`, `camera_lidar_fusion`, `lidar_radar_fusion`, `lidar` or `radar`"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`"/>
<arg name="lidar_detection_model" default="centerpoint" description="options: `centerpoint`, `apollo`, `pointpainting`"/>
<arg name="image_raw0" default="/sensing/camera/camera0/image_rect_color" description="image raw topic name"/>
<arg name="camera_info0" default="/sensing/camera/camera0/camera_info" description="camera info topic name"/>
<arg name="image_raw1" default="/sensing/camera/camera1/image_rect_color"/>
Expand Down
15 changes: 5 additions & 10 deletions perception/image_projection_based_fusion/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -117,15 +117,15 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
else()
message(STATUS "diff ${FILE_NAME}")
message(STATUS "File hash changes. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v1/${FILE_NAME}
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v2/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
endif()
else()
message(STATUS "not found ${FILE_NAME}")
message(STATUS "File doesn't exists. Downloading now ...")
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v1/${FILE_NAME}
file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/pointpainting/v2/${FILE_NAME}
${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300)
list(GET DOWNLOAD_STATUS 0 STATUS_CODE)
list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE)
Expand All @@ -138,8 +138,8 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
endfunction()

# default model
download(pts_voxel_encoder_pointpainting.onnx 795a919537b71faae706b8c71312d31a)
download(pts_backbone_neck_head_pointpainting.onnx edb48a74659820904071ea8050413c23)
download(pts_voxel_encoder_pointpainting.onnx 438dfecd962631ec8f011e0dfa2c6160)
download(pts_backbone_neck_head_pointpainting.onnx e590a0b2bdcd35e01340cf4521bf149e)
find_package(OpenCV REQUIRED)
find_package(Eigen3 REQUIRED)

Expand Down Expand Up @@ -173,15 +173,10 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL)
)

rclcpp_components_register_node(pointpainting_lib
PLUGIN "image_projection_based_fusion::PointpaintingFusionNode"
PLUGIN "image_projection_based_fusion::PointPaintingFusionNode"
EXECUTABLE pointpainting_fusion_node
)

install(
TARGETS pointpainting_cuda_lib
DESTINATION lib
)

else()
message("Skipping build of some nodes due to missing dependencies")
endif()
Expand Down
4 changes: 2 additions & 2 deletions perception/image_projection_based_fusion/config/pointpainting.param.yaml
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
/**:
ros__parameters:
class_names: ["CAR", "PEDESTRIAN", "BICYCLE"]
point_feature_size: 6 # x, y, z and car, pedestrian, bicycle
point_feature_size: 7 # x, y, z, timelag and car, pedestrian, bicycle
max_voxel_size: 40000
point_cloud_range: [-76.8, -76.8, -3.0, 76.8, 76.8, 5.0]
voxel_size: [0.32, 0.32, 8.0]
downsample_factor: 1
encoder_in_feature_size: 11
encoder_in_feature_size: 12
yaw_norm_thresholds: [0.3, 0.0, 0.3]
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Example:

## References/External links

[1] Vora, Sourabh, et al. "Pointpainting: Sequential fusion for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.
[1] Vora, Sourabh, et al. "PointPainting: Sequential fusion for 3d object detection." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2020.

[2] CVPR'20 Workshop on Scalability in Autonomous Driving] Waymo Open Dataset Challenge: <https://youtu.be/9g9GsI33ol8?t=535>
Ding, Zhuangzhuang, et al. "1st Place Solution for Waymo Open Dataset Challenge--3D Detection and Domain Adaptation." arXiv preprint arXiv:2006.15505 (2020).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ namespace image_projection_based_fusion
{
using Label = autoware_auto_perception_msgs::msg::ObjectClassification;

class PointpaintingFusionNode : public FusionNode<sensor_msgs::msg::PointCloud2, DetectedObjects>
class PointPaintingFusionNode : public FusionNode<sensor_msgs::msg::PointCloud2, DetectedObjects>
{
public:
explicit PointpaintingFusionNode(const rclcpp::NodeOptions & options);
explicit PointPaintingFusionNode(const rclcpp::NodeOptions & options);

protected:
void preprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
<remap from="~/output/objects" to="$(var output/objects)"/>
<param name="score_threshold" value="0.45"/>
<param name="densification_world_frame_id" value="map"/>
<param name="densification_num_past_frames" value="0"/>
<param name="densification_num_past_frames" value="2"/>
<param name="trt_precision" value="fp16"/>
<param name="encoder_onnx_path" value="$(var model_path)/pts_voxel_encoder_$(var model_name).onnx"/>
<param name="encoder_engine_path" value="$(var model_path)/pts_voxel_encoder_$(var model_name).engine"/>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
namespace image_projection_based_fusion
{

PointpaintingFusionNode::PointpaintingFusionNode(const rclcpp::NodeOptions & options)
PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & options)
: FusionNode<sensor_msgs::msg::PointCloud2, DetectedObjects>("pointpainting_fusion", options)
{
const float score_threshold =
Expand Down Expand Up @@ -84,7 +84,7 @@ PointpaintingFusionNode::PointpaintingFusionNode(const rclcpp::NodeOptions & opt
obj_pub_ptr_ = this->create_publisher<DetectedObjects>("~/output/objects", rclcpp::QoS{1});
}

void PointpaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg)
void PointPaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg)
{
sensor_msgs::msg::PointCloud2 tmp;
tmp = painted_pointcloud_msg;
Expand Down Expand Up @@ -131,7 +131,7 @@ void PointpaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted
static_cast<uint32_t>(painted_pointcloud_msg.data.size() / painted_pointcloud_msg.height);
}

void PointpaintingFusionNode::fuseOnSingleImage(
void PointPaintingFusionNode::fuseOnSingleImage(
__attribute__((unused)) const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg,
const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg,
const sensor_msgs::msg::CameraInfo & camera_info,
Expand Down Expand Up @@ -231,7 +231,7 @@ void PointpaintingFusionNode::fuseOnSingleImage(
}
}

void PointpaintingFusionNode::postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg)
void PointPaintingFusionNode::postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg)
{
std::vector<centerpoint::Box3D> det_boxes3d;
bool is_success = detector_ptr_->detect(painted_pointcloud_msg, tf_buffer_, det_boxes3d);
Expand All @@ -255,12 +255,12 @@ void PointpaintingFusionNode::postprocess(sensor_msgs::msg::PointCloud2 & painte
obj_pub_ptr_->publish(output_obj_msg);
}

bool PointpaintingFusionNode::out_of_scope(__attribute__((unused)) const DetectedObjects & obj)
bool PointPaintingFusionNode::out_of_scope(__attribute__((unused)) const DetectedObjects & obj)
{
return false;
}

} // namespace image_projection_based_fusion

#include <rclcpp_components/register_node_macro.hpp>
RCLCPP_COMPONENTS_REGISTER_NODE(image_projection_based_fusion::PointpaintingFusionNode)
RCLCPP_COMPONENTS_REGISTER_NODE(image_projection_based_fusion::PointPaintingFusionNode)
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ namespace
{
const std::size_t MAX_POINT_IN_VOXEL_SIZE = 32; // the same as max_point_in_voxel_size_ in config
const std::size_t WARPS_PER_BLOCK = 4;
const std::size_t ENCODER_IN_FEATURE_SIZE = 11; // same as encoder_in_feature_size_ in config.hpp
const std::size_t ENCODER_IN_FEATURE_SIZE = 12; // same as encoder_in_feature_size_ in config.hpp

std::size_t divup(const std::size_t a, const std::size_t b)
{
Expand Down Expand Up @@ -70,7 +70,7 @@ __global__ void generateFeatures_kernel(
if (pillar_idx >= num_voxels) return;

// load src
const int POINT_FEATURE_SIZE = 6;
const int POINT_FEATURE_SIZE = 7;
__shared__ float pillarSM[WARPS_PER_BLOCK][MAX_POINT_IN_VOXEL_SIZE][POINT_FEATURE_SIZE];
__shared__ float3 pillarSumSM[WARPS_PER_BLOCK];
__shared__ int3 cordsSM[WARPS_PER_BLOCK];
Expand Down Expand Up @@ -129,14 +129,15 @@ __global__ void generateFeatures_kernel(
pillarOutSM[pillar_idx_inBlock][point_idx][3] = pillarSM[pillar_idx_inBlock][point_idx][3];
pillarOutSM[pillar_idx_inBlock][point_idx][4] = pillarSM[pillar_idx_inBlock][point_idx][4];
pillarOutSM[pillar_idx_inBlock][point_idx][5] = pillarSM[pillar_idx_inBlock][point_idx][5];
pillarOutSM[pillar_idx_inBlock][point_idx][6] = pillarSM[pillar_idx_inBlock][point_idx][6];

// change index
pillarOutSM[pillar_idx_inBlock][point_idx][6] = mean.x;
pillarOutSM[pillar_idx_inBlock][point_idx][7] = mean.y;
pillarOutSM[pillar_idx_inBlock][point_idx][8] = mean.z;
pillarOutSM[pillar_idx_inBlock][point_idx][7] = mean.x;
pillarOutSM[pillar_idx_inBlock][point_idx][8] = mean.y;
pillarOutSM[pillar_idx_inBlock][point_idx][9] = mean.z;

pillarOutSM[pillar_idx_inBlock][point_idx][9] = center.x;
pillarOutSM[pillar_idx_inBlock][point_idx][10] = center.y;
pillarOutSM[pillar_idx_inBlock][point_idx][10] = center.x;
pillarOutSM[pillar_idx_inBlock][point_idx][11] = center.y;

} else {
pillarOutSM[pillar_idx_inBlock][point_idx][0] = 0;
Expand All @@ -152,6 +153,7 @@ __global__ void generateFeatures_kernel(
pillarOutSM[pillar_idx_inBlock][point_idx][8] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][9] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][10] = 0;
pillarOutSM[pillar_idx_inBlock][point_idx][11] = 0;
}

__syncthreads();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,19 +44,25 @@ std::size_t VoxelGenerator::pointsToVoxels(
for (auto pc_cache_iter = pd_ptr_->getPointCloudCacheIter(); !pd_ptr_->isCacheEnd(pc_cache_iter);
pc_cache_iter++) {
auto pc_msg = pc_cache_iter->pointcloud_msg;
auto affine_past2current =
pd_ptr_->getAffineWorldToCurrent() * pc_cache_iter->affine_past2world;
float timelag = static_cast<float>(
pd_ptr_->getCurrentTimestamp() - rclcpp::Time(pc_msg.header.stamp).seconds());

for (sensor_msgs::PointCloud2ConstIterator<float> x_iter(pc_msg, "x"), y_iter(pc_msg, "y"),
z_iter(pc_msg, "z"), car_iter(pc_msg, "CAR"), ped_iter(pc_msg, "PEDESTRIAN"),
bic_iter(pc_msg, "BICYCLE");
x_iter != x_iter.end(); ++x_iter, ++y_iter, ++z_iter, ++car_iter, ++ped_iter, ++bic_iter) {
point_current << *x_iter, *y_iter, *z_iter;
point_past << *x_iter, *y_iter, *z_iter;
point_current = affine_past2current * point_past;

point[0] = point_current.x();
point[1] = point_current.y();
point[2] = point_current.z();
point[3] = *car_iter;
point[4] = *ped_iter;
point[5] = *bic_iter;
point[3] = timelag;
point[4] = *car_iter;
point[5] = *ped_iter;
point[6] = *bic_iter;

out_of_range = false;
for (std::size_t di = 0; di < config_.point_dim_size_; di++) {
Expand Down