Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support color and depth/ir formats #2840

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 13 additions & 9 deletions realsense2_camera/include/base_realsense_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,24 +205,23 @@ namespace realsense2_camera
void publishDynamicTransforms();
void publishPointCloud(rs2::points f, const rclcpp::Time& t, const rs2::frameset& frameset);
Extrinsics rsExtrinsicsToMsg(const rs2_extrinsics& extrinsics) const;

IMUInfo getImuInfo(const rs2::stream_profile& profile);

void fillMessageImage(
void initializeFormatsMaps();

bool fillROSImageMsgAndReturnStatus(
const cv::Mat& cv_matrix_image,
const stream_index_pair& stream,
unsigned int width,
unsigned int height,
unsigned int bpp,
const rs2_format& stream_format,
const rclcpp::Time& t,
sensor_msgs::msg::Image* img_msg_ptr);

cv::Mat& getCVMatImage(
bool fillCVMatImageAndReturnStatus(
rs2::frame& frame,
std::map<stream_index_pair, cv::Mat>& images,
unsigned int width,
unsigned int height,
unsigned int bpp,
const stream_index_pair& stream);

void publishFrame(
Expand All @@ -234,7 +233,12 @@ namespace realsense2_camera
const std::map<stream_index_pair, std::shared_ptr<image_publisher>>& image_publishers,
const bool is_publishMetadata = true);

void publishRGBD(const cv::Mat& rgb_cv_matrix, const cv::Mat& depth_cv_matrix, const rclcpp::Time& t);
void publishRGBD(
const cv::Mat& rgb_cv_matrix,
const rs2_format& color_format,
const cv::Mat& depth_cv_matrix,
const rs2_format& depth_format,
const rclcpp::Time& t);

void publishMetadata(rs2::frame f, const rclcpp::Time& header_time, const std::string& frame_id);

Expand Down Expand Up @@ -293,14 +297,14 @@ namespace realsense2_camera
std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr> _imu_publishers;
std::shared_ptr<rclcpp::Publisher<nav_msgs::msg::Odometry>> _odom_publisher;
std::shared_ptr<SyncedImuPublisher> _synced_imu_publisher;
std::map<unsigned int, int> _image_formats;
std::map<stream_index_pair, rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr> _info_publishers;
std::map<stream_index_pair, rclcpp::Publisher<realsense2_camera_msgs::msg::Metadata>::SharedPtr> _metadata_publishers;
std::map<stream_index_pair, rclcpp::Publisher<IMUInfo>::SharedPtr> _imu_info_publishers;
std::map<stream_index_pair, rclcpp::Publisher<Extrinsics>::SharedPtr> _extrinsics_publishers;
rclcpp::Publisher<realsense2_camera_msgs::msg::RGBD>::SharedPtr _rgbd_publisher;
std::map<stream_index_pair, cv::Mat> _images;
std::map<unsigned int, std::string> _encoding;
std::map<rs2_format, std::string> _rs_format_to_ros_format;
std::map<rs2_format, int> _rs_format_to_cv_format;

std::map<stream_index_pair, sensor_msgs::msg::CameraInfo> _camera_info;
std::atomic_bool _is_initialized_time_base;
Expand Down
160 changes: 124 additions & 36 deletions realsense2_camera/src/base_realsense_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,7 @@ BaseRealSenseNode::BaseRealSenseNode(rclcpp::Node& node,
ROS_INFO("Intra-Process communication enabled");
}

_image_formats[1] = CV_8UC1; // CVBridge type
_image_formats[2] = CV_16UC1; // CVBridge type
_image_formats[3] = CV_8UC3; // CVBridge type
_encoding[1] = sensor_msgs::image_encodings::MONO8; // ROS message type
_encoding[2] = sensor_msgs::image_encodings::TYPE_16UC1; // ROS message type
_encoding[3] = sensor_msgs::image_encodings::RGB8; // ROS message type

initializeFormatsMaps();
_monitor_options = {RS2_OPTION_ASIC_TEMPERATURE, RS2_OPTION_PROJECTOR_TEMPERATURE};
}

Expand Down Expand Up @@ -171,6 +165,47 @@ void BaseRealSenseNode::publishTopics()
ROS_INFO_STREAM("RealSense Node Is Up!");
}

void BaseRealSenseNode::initializeFormatsMaps()
{
// from rs2_format to OpenCV format
// https://docs.opencv.org/3.4/d1/d1b/group__core__hal__interface.html
// https://docs.opencv.org/2.4/modules/core/doc/basic_structures.html
// CV_<bit-depth>{U|S|F}C(<number_of_channels>)
// where U is unsigned integer type, S is signed integer type, and F is float type.
// For example, CV_8UC1 means a 8-bit single-channel array,
// CV_32FC2 means a 2-channel (complex) floating-point array, and so on.
_rs_format_to_cv_format[RS2_FORMAT_Y8] = CV_8UC1;
_rs_format_to_cv_format[RS2_FORMAT_Y16] = CV_16UC1;
_rs_format_to_cv_format[RS2_FORMAT_Z16] = CV_16UC1;
_rs_format_to_cv_format[RS2_FORMAT_RGB8] = CV_8UC3;
_rs_format_to_cv_format[RS2_FORMAT_BGR8] = CV_8UC3;
_rs_format_to_cv_format[RS2_FORMAT_RGBA8] = CV_8UC4;
_rs_format_to_cv_format[RS2_FORMAT_BGRA8] = CV_8UC4;
_rs_format_to_cv_format[RS2_FORMAT_YUYV] = CV_8UC2;
_rs_format_to_cv_format[RS2_FORMAT_UYVY] = CV_8UC2;
// _rs_format_to_cv_format[RS2_FORMAT_M420] = not supported yet in ROS2
_rs_format_to_cv_format[RS2_FORMAT_RAW8] = CV_8UC1;
_rs_format_to_cv_format[RS2_FORMAT_RAW10] = CV_16UC1;
_rs_format_to_cv_format[RS2_FORMAT_RAW16] = CV_16UC1;

// from rs2_format to ROS2 image msg encoding (format)
// http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Image.html
// http://docs.ros.org/en/jade/api/sensor_msgs/html/image__encodings_8h_source.html
_rs_format_to_ros_format[RS2_FORMAT_Y8] = sensor_msgs::image_encodings::MONO8;
_rs_format_to_ros_format[RS2_FORMAT_Y16] = sensor_msgs::image_encodings::MONO16;
_rs_format_to_ros_format[RS2_FORMAT_Z16] = sensor_msgs::image_encodings::TYPE_16UC1;
_rs_format_to_ros_format[RS2_FORMAT_RGB8] = sensor_msgs::image_encodings::RGB8;
_rs_format_to_ros_format[RS2_FORMAT_BGR8] = sensor_msgs::image_encodings::BGR8;
_rs_format_to_ros_format[RS2_FORMAT_RGBA8] = sensor_msgs::image_encodings::RGBA8;
_rs_format_to_ros_format[RS2_FORMAT_BGRA8] = sensor_msgs::image_encodings::BGRA8;
_rs_format_to_ros_format[RS2_FORMAT_YUYV] = sensor_msgs::image_encodings::YUV422_YUY2;
_rs_format_to_ros_format[RS2_FORMAT_UYVY] = sensor_msgs::image_encodings::YUV422;
// _rs_format_to_ros_format[RS2_FORMAT_M420] = not supported yet in ROS2
_rs_format_to_ros_format[RS2_FORMAT_RAW8] = sensor_msgs::image_encodings::TYPE_8UC1;
_rs_format_to_ros_format[RS2_FORMAT_RAW10] = sensor_msgs::image_encodings::TYPE_16UC1;
_rs_format_to_ros_format[RS2_FORMAT_RAW16] = sensor_msgs::image_encodings::TYPE_16UC1;
}

void BaseRealSenseNode::setupFilters()
{
_filters.push_back(std::make_shared<NamedFilter>(std::make_shared<rs2::decimation_filter>(), _parameters, _logger));
Expand Down Expand Up @@ -220,7 +255,7 @@ cv::Mat& BaseRealSenseNode::fix_depth_scale(const cv::Mat& from_image, cv::Mat&
to_image.create(from_image.rows, from_image.cols, from_image.type());
}

CV_Assert(from_image.depth() == _image_formats[2]);
CV_Assert(CV_MAKETYPE(from_image.depth(),from_image.channels()) == _rs_format_to_cv_format[RS2_FORMAT_Z16]);

int nRows = from_image.rows;
int nCols = from_image.cols;
Expand Down Expand Up @@ -555,7 +590,9 @@ void BaseRealSenseNode::frame_callback(rs2::frame frame)
// On this line we already know original_depth_frame is valid.
if(_enable_rgbd && original_color_frame)
{
publishRGBD(_images[COLOR], _depth_aligned_image[COLOR], t);
auto color_format = original_color_frame.get_profile().format();
auto depth_format = original_depth_frame.get_profile().format();
publishRGBD(_images[COLOR], color_format, _depth_aligned_image[COLOR], depth_format, t);
}
}
}
Expand Down Expand Up @@ -825,39 +862,62 @@ IMUInfo BaseRealSenseNode::getImuInfo(const rs2::stream_profile& profile)
return info;
}

void BaseRealSenseNode::fillMessageImage(
bool BaseRealSenseNode::fillROSImageMsgAndReturnStatus(
const cv::Mat& cv_matrix_image,
const stream_index_pair& stream,
unsigned int width,
unsigned int height,
unsigned int bpp,
const rs2_format& stream_format,
const rclcpp::Time& t,
sensor_msgs::msg::Image* img_msg_ptr)
{
if (cv_matrix_image.empty())
{
ROS_ERROR_STREAM("cv::Mat is empty. Ignoring this frame.");
return false;
}
else if (_rs_format_to_ros_format.find(stream_format) == _rs_format_to_ros_format.end())
{
ROS_ERROR_STREAM("Format " << rs2_format_to_string(stream_format) << " is not supported in ROS2 image messages"
<< "Please try different format of this stream.");
return false;
}
// Convert the CV::Mat into a ROS image message (1 copy is done here)
cv_bridge::CvImage(std_msgs::msg::Header(), _encoding.at(bpp), cv_matrix_image).toImageMsg(*img_msg_ptr);
cv_bridge::CvImage(std_msgs::msg::Header(), _rs_format_to_ros_format[stream_format], cv_matrix_image).toImageMsg(*img_msg_ptr);

// Convert OpenCV Mat to ROS Image
img_msg_ptr->header.frame_id = OPTICAL_FRAME_ID(stream);
img_msg_ptr->header.stamp = t;
img_msg_ptr->height = height;
img_msg_ptr->width = width;
img_msg_ptr->is_bigendian = false;
img_msg_ptr->step = width * bpp;
img_msg_ptr->step = width * cv_matrix_image.elemSize();

return true;
}

cv::Mat& BaseRealSenseNode::getCVMatImage(
bool BaseRealSenseNode::fillCVMatImageAndReturnStatus(
rs2::frame& frame,
std::map<stream_index_pair, cv::Mat>& images,
unsigned int width,
unsigned int height,
unsigned int bpp,
const stream_index_pair& stream)
{
auto& image = images[stream];
if (image.size() != cv::Size(width, height) || image.depth() != _image_formats[bpp])
auto stream_format = frame.get_profile().format();

if (_rs_format_to_cv_format.find(stream_format) == _rs_format_to_cv_format.end())
{
ROS_ERROR_STREAM("Format " << rs2_format_to_string(stream_format) << " is not supported in realsense2_camera node."
<< "\nPlease try different format of this stream.");
return false;
}
// we try to reduce image creation as much we can, so we check if the same image structure
// was already created before, and we fill this image next with the frame data
// image.create() should be called once per <stream>_<profile>_<format>
if (image.size() != cv::Size(width, height) || CV_MAKETYPE(image.depth(), image.channels()) != _rs_format_to_cv_format[stream_format])
{
image.create(height, width, _image_formats[bpp]);
image.create(height, width, _rs_format_to_cv_format[stream_format]);
}

image.data = (uint8_t*)frame.get_data();
Expand All @@ -867,8 +927,7 @@ cv::Mat& BaseRealSenseNode::getCVMatImage(
image = fix_depth_scale(image, _depth_scaled_image[stream]);
}

return image;

return true;
}

void BaseRealSenseNode::publishFrame(
Expand All @@ -883,13 +942,18 @@ void BaseRealSenseNode::publishFrame(
ROS_DEBUG("publishFrame(...)");
unsigned int width = 0;
unsigned int height = 0;
unsigned int bpp = 1;
auto stream_format = RS2_FORMAT_ANY;
if (f.is<rs2::video_frame>())
{
auto timage = f.as<rs2::video_frame>();
width = timage.get_width();
height = timage.get_height();
bpp = timage.get_bytes_per_pixel();
stream_format = timage.get_profile().format();

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why will this produce different results than line 933?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I tested this with some flow, will verify this again tomorrow.

Copy link
Collaborator Author

@SamerKhshiboun SamerKhshiboun Aug 15, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed, and printed an error if check fail.

}
else
{
ROS_ERROR("f.is<rs2::video_frame>() check failed. Frame was dropped.");
return;
}

// Publish stream image
Expand All @@ -901,33 +965,43 @@ void BaseRealSenseNode::publishFrame(
// if rgbd has subscribers we fetch the CV image here
if (_rgbd_publisher && 0 != _rgbd_publisher->get_subscription_count())
{
image_cv_matrix = getCVMatImage(f, images, width, height, bpp, stream);
if (fillCVMatImageAndReturnStatus(f, images, width, height, stream))
{
image_cv_matrix = images[stream];
}
}

// if depth/color has subscribers, ask first if rgbd already fetched
// the images from the frame. if not, fetch the relevant color/depth image.
if (0 != image_publisher->get_subscription_count())
{
if(image_cv_matrix.empty())
if (image_cv_matrix.empty() && fillCVMatImageAndReturnStatus(f, images, width, height, stream))
{
image_cv_matrix = getCVMatImage(f, images, width, height, bpp, stream);
image_cv_matrix = images[stream];
}

// Prepare image topic to be published
// We use UniquePtr for allow intra-process publish when subscribers of that type are available
sensor_msgs::msg::Image::UniquePtr img_msg_ptr(new sensor_msgs::msg::Image());
fillMessageImage(image_cv_matrix, stream, width, height, bpp, t, img_msg_ptr.get());
if (!img_msg_ptr)
{
ROS_ERROR("sensor image message allocation failed, frame was dropped");
ROS_ERROR("Sensor image message allocation failed. Frame was dropped.");
return;
}

// Transfer the unique pointer ownership to the RMW
sensor_msgs::msg::Image *msg_address = img_msg_ptr.get();
image_publisher->publish(std::move(img_msg_ptr));

ROS_DEBUG_STREAM(rs2_stream_to_string(f.get_profile().stream_type()) << " stream published, message address: " << std::hex << msg_address);
if (fillROSImageMsgAndReturnStatus(image_cv_matrix, stream, width, height, stream_format, t, img_msg_ptr.get()))
{

// Transfer the unique pointer ownership to the RMW
sensor_msgs::msg::Image *msg_address = img_msg_ptr.get();
image_publisher->publish(std::move(img_msg_ptr));

ROS_DEBUG_STREAM(rs2_stream_to_string(f.get_profile().stream_type()) << " stream published, message address: " << std::hex << msg_address);
}
else
{
ROS_ERROR("Could not fill ROS message. Frame was dropped.");
}
}
}

Expand Down Expand Up @@ -977,22 +1051,36 @@ void BaseRealSenseNode::publishFrame(
}


void BaseRealSenseNode::publishRGBD(const cv::Mat& rgb_cv_matrix, const cv::Mat& depth_cv_matrix, const rclcpp::Time& t)
void BaseRealSenseNode::publishRGBD(
const cv::Mat& rgb_cv_matrix,
const rs2_format& color_format,
const cv::Mat& depth_cv_matrix,
const rs2_format& depth_format,
const rclcpp::Time& t)
{
if (_rgbd_publisher && 0 != _rgbd_publisher->get_subscription_count())
{
ROS_DEBUG_STREAM("Publishing RGBD message");
unsigned int rgb_width = rgb_cv_matrix.size().width;
unsigned int rgb_height = rgb_cv_matrix.size().height;
unsigned int rgb_bpp = rgb_cv_matrix.elemSize();
unsigned int depth_width = depth_cv_matrix.size().width;
unsigned int depth_height = depth_cv_matrix.size().height;
unsigned int depth_bpp = depth_cv_matrix.elemSize();

realsense2_camera_msgs::msg::RGBD::UniquePtr msg(new realsense2_camera_msgs::msg::RGBD());

fillMessageImage(rgb_cv_matrix, COLOR, rgb_width, rgb_height, rgb_bpp, t, &msg->rgb);
fillMessageImage(depth_cv_matrix, DEPTH, depth_width, depth_height, depth_bpp, t, &msg->depth);
bool rgb_message_filled = fillROSImageMsgAndReturnStatus(rgb_cv_matrix, COLOR, rgb_width, rgb_height, color_format, t, &msg->rgb);
if(!rgb_message_filled)
{
ROS_ERROR_STREAM("Failed to fill rgb message inside RGBD message");
return;
}

bool depth_messages_filled = fillROSImageMsgAndReturnStatus(depth_cv_matrix, DEPTH, depth_width, depth_height, depth_format, t, &msg->depth);
if(!depth_messages_filled)
{
ROS_ERROR_STREAM("Failed to fill depth message inside RGBD message");
return;
}

msg->header.frame_id = "camera_rgbd_optical_frame";
msg->header.stamp = t;
Expand Down