diff --git a/CMakeLists.txt b/CMakeLists.txt index c7ff85681..d37e1c9ba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -160,6 +160,7 @@ add_library(${TARGET_CORE_NAME} src/pipeline/node/IMU.cpp src/pipeline/node/EdgeDetector.cpp src/pipeline/node/SPIIn.cpp + src/pipeline/node/FeatureTracker.cpp src/pipeline/datatype/Buffer.cpp src/pipeline/datatype/ImgFrame.cpp src/pipeline/datatype/ImageManipConfig.cpp @@ -175,6 +176,8 @@ add_library(${TARGET_CORE_NAME} src/pipeline/datatype/IMUData.cpp src/pipeline/datatype/StereoDepthConfig.cpp src/pipeline/datatype/EdgeDetectorConfig.cpp + src/pipeline/datatype/TrackedFeatures.cpp + src/pipeline/datatype/FeatureTrackerConfig.cpp src/utility/Initialization.cpp src/utility/Resources.cpp src/xlink/XLinkConnection.cpp diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index e60e77c6c..ff1437379 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "5a4001ab6618e113eb14ef5020a4773dce48e981") +set(DEPTHAI_DEVICE_SIDE_COMMIT "a858d35dbe1574fc02e590ae04524ade7b5bf0ba") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 8f6664417..c977e61e9 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -271,6 +271,10 @@ dai_add_example(bootloader_version src/bootloader_version.cpp ON) #dai_add_example(flash_bootloader src/flash_bootloader.cpp OFF) dai_add_example(edge_detector src/edge_detector.cpp ON) +dai_add_example(feature_tracker src/feature_tracker.cpp ON) + +dai_add_example(corner_detector src/corner_detector.cpp ON) + # Calibration Read and write samples dai_add_example(calibration_flash src/calibration_flash.cpp OFF) dai_add_example(calibration_flash_version5 src/calibration_flash_v5.cpp OFF) diff --git a/examples/src/corner_detector.cpp b/examples/src/corner_detector.cpp new file mode 100644 index 000000000..44921f7de --- /dev/null +++ b/examples/src/corner_detector.cpp @@ -0,0 +1,118 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" +#include "deque" +#include "unordered_map" +#include "unordered_set" + +static void drawFeatures(cv::Mat& frame, std::vector& features) { + static const auto pointColor = cv::Scalar(0, 0, 255); + static const int circleRadius = 2; + for(auto& feature : features) { + cv::circle(frame, cv::Point(feature.position.x, feature.position.y), circleRadius, pointColor, -1, cv::LINE_AA, 0); + } +} + +int main() { + using namespace std; + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto featureTrackerLeft = pipeline.create(); + auto featureTrackerRight = pipeline.create(); + + auto xoutPassthroughFrameLeft = pipeline.create(); + auto xoutTrackedFeaturesLeft = pipeline.create(); + auto xoutPassthroughFrameRight = pipeline.create(); + auto xoutTrackedFeaturesRight = pipeline.create(); + auto xinTrackedFeaturesConfig = pipeline.create(); + + xoutPassthroughFrameLeft->setStreamName("passthroughFrameLeft"); + xoutTrackedFeaturesLeft->setStreamName("trackedFeaturesLeft"); + xoutPassthroughFrameRight->setStreamName("passthroughFrameRight"); + xoutTrackedFeaturesRight->setStreamName("trackedFeaturesRight"); + xinTrackedFeaturesConfig->setStreamName("trackedFeaturesConfig"); + + // Properties + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + // Disable optical flow + featureTrackerLeft->initialConfig.setMotionEstimator(false); + featureTrackerRight->initialConfig.setMotionEstimator(false); + + // Linking + monoLeft->out.link(featureTrackerLeft->inputImage); + featureTrackerLeft->passthroughInputImage.link(xoutPassthroughFrameLeft->input); + featureTrackerLeft->outputFeatures.link(xoutTrackedFeaturesLeft->input); + xinTrackedFeaturesConfig->out.link(featureTrackerLeft->inputConfig); + + monoRight->out.link(featureTrackerRight->inputImage); + featureTrackerRight->passthroughInputImage.link(xoutPassthroughFrameRight->input); + featureTrackerRight->outputFeatures.link(xoutTrackedFeaturesRight->input); + xinTrackedFeaturesConfig->out.link(featureTrackerRight->inputConfig); + + auto featureTrackerConfig = featureTrackerRight->initialConfig.get(); + + printf("Press 's' to switch between Harris and Shi-Thomasi corner detector! \n"); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues used to receive the results + auto passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, false); + auto outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, false); + auto passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, false); + auto outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, false); + + auto inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig"); + + const auto leftWindowName = "left"; + const auto rightWindowName = "right"; + + while(true) { + auto inPassthroughFrameLeft = passthroughImageLeftQueue->get(); + cv::Mat passthroughFrameLeft = inPassthroughFrameLeft->getFrame(); + cv::Mat leftFrame; + cv::cvtColor(passthroughFrameLeft, leftFrame, cv::COLOR_GRAY2BGR); + + auto inPassthroughFrameRight = passthroughImageRightQueue->get(); + cv::Mat passthroughFrameRight = inPassthroughFrameRight->getFrame(); + cv::Mat rightFrame; + cv::cvtColor(passthroughFrameRight, rightFrame, cv::COLOR_GRAY2BGR); + + auto trackedFeaturesLeft = outputFeaturesLeftQueue->get()->trackedFeatures; + drawFeatures(leftFrame, trackedFeaturesLeft); + + auto trackedFeaturesRight = outputFeaturesRightQueue->get()->trackedFeatures; + drawFeatures(rightFrame, trackedFeaturesRight); + + // Show the frame + cv::imshow(leftWindowName, leftFrame); + cv::imshow(rightWindowName, rightFrame); + + int key = cv::waitKey(1); + if(key == 'q') { + break; + } else if(key == 's') { + if(featureTrackerConfig.cornerDetector.type == dai::FeatureTrackerConfig::CornerDetector::Type::HARRIS) { + featureTrackerConfig.cornerDetector.type = dai::FeatureTrackerConfig::CornerDetector::Type::SHI_THOMASI; + printf("Switching to Shi-Thomasi \n"); + } else { + featureTrackerConfig.cornerDetector.type = dai::FeatureTrackerConfig::CornerDetector::Type::HARRIS; + printf("Switching to Harris \n"); + } + auto cfg = dai::FeatureTrackerConfig(); + cfg.set(featureTrackerConfig); + inputFeatureTrackerConfigQueue->send(cfg); + } + } + return 0; +} diff --git a/examples/src/feature_tracker.cpp b/examples/src/feature_tracker.cpp new file mode 100644 index 000000000..c12e5ce3a --- /dev/null +++ b/examples/src/feature_tracker.cpp @@ -0,0 +1,194 @@ +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" +#include "deque" +#include "unordered_map" +#include "unordered_set" + +static const auto lineColor = cv::Scalar(200, 0, 200); +static const auto pointColor = cv::Scalar(0, 0, 255); + +class FeatureTrackerDrawer { + private: + static const int circleRadius = 2; + static const int maxTrackedFeaturesPathLength = 30; + // for how many frames the feature is tracked + static int trackedFeaturesPathLength; + + using featureIdType = decltype(dai::Point2f::x); + + std::unordered_set trackedIDs; + std::unordered_map> trackedFeaturesPath; + + std::string trackbarName; + std::string windowName; + + public: + void trackFeaturePath(std::vector& features) { + std::unordered_set newTrackedIDs; + for(auto& currentFeature : features) { + auto currentID = currentFeature.id; + newTrackedIDs.insert(currentID); + + if(!trackedFeaturesPath.count(currentID)) { + trackedFeaturesPath.insert({currentID, std::deque()}); + } + std::deque& path = trackedFeaturesPath.at(currentID); + + path.push_back(currentFeature.position); + while(path.size() > std::max(1, trackedFeaturesPathLength)) { + path.pop_front(); + } + } + + std::unordered_set featuresToRemove; + for(auto& oldId : trackedIDs) { + if(!newTrackedIDs.count(oldId)) { + featuresToRemove.insert(oldId); + } + } + + for(auto& id : featuresToRemove) { + trackedFeaturesPath.erase(id); + } + + trackedIDs = newTrackedIDs; + } + + void drawFeatures(cv::Mat& img) { + cv::setTrackbarPos(trackbarName.c_str(), windowName.c_str(), trackedFeaturesPathLength); + + for(auto& featurePath : trackedFeaturesPath) { + std::deque& path = featurePath.second; + int j = 0; + for(j = 0; j < path.size() - 1; j++) { + auto src = cv::Point(path[j].x, path[j].y); + auto dst = cv::Point(path[j + 1].x, path[j + 1].y); + cv::line(img, src, dst, lineColor, 1, cv::LINE_AA, 0); + } + + cv::circle(img, cv::Point(path[j].x, path[j].y), circleRadius, pointColor, -1, cv::LINE_AA, 0); + } + } + + FeatureTrackerDrawer(std::string trackbarName, std::string windowName) { + this->trackbarName = trackbarName; + this->windowName = windowName; + cv::namedWindow(windowName.c_str()); + cv::createTrackbar(trackbarName.c_str(), windowName.c_str(), &trackedFeaturesPathLength, maxTrackedFeaturesPathLength, nullptr); + } +}; + +int FeatureTrackerDrawer::trackedFeaturesPathLength = 10; + +int main() { + using namespace std; + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto featureTrackerLeft = pipeline.create(); + auto featureTrackerRight = pipeline.create(); + + auto xoutPassthroughFrameLeft = pipeline.create(); + auto xoutTrackedFeaturesLeft = pipeline.create(); + auto xoutPassthroughFrameRight = pipeline.create(); + auto xoutTrackedFeaturesRight = pipeline.create(); + auto xinTrackedFeaturesConfig = pipeline.create(); + + xoutPassthroughFrameLeft->setStreamName("passthroughFrameLeft"); + xoutTrackedFeaturesLeft->setStreamName("trackedFeaturesLeft"); + xoutPassthroughFrameRight->setStreamName("passthroughFrameRight"); + xoutTrackedFeaturesRight->setStreamName("trackedFeaturesRight"); + xinTrackedFeaturesConfig->setStreamName("trackedFeaturesConfig"); + + // Properties + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + // Linking + monoLeft->out.link(featureTrackerLeft->inputImage); + featureTrackerLeft->passthroughInputImage.link(xoutPassthroughFrameLeft->input); + featureTrackerLeft->outputFeatures.link(xoutTrackedFeaturesLeft->input); + xinTrackedFeaturesConfig->out.link(featureTrackerLeft->inputConfig); + + monoRight->out.link(featureTrackerRight->inputImage); + featureTrackerRight->passthroughInputImage.link(xoutPassthroughFrameRight->input); + featureTrackerRight->outputFeatures.link(xoutTrackedFeaturesRight->input); + xinTrackedFeaturesConfig->out.link(featureTrackerRight->inputConfig); + + // By default the least mount of resources are allocated + // increasing it improves performance when optical flow is enabled + auto numShaves = 2; + auto numMemorySlices = 2; + featureTrackerLeft->setHardwareResources(numShaves, numMemorySlices); + featureTrackerRight->setHardwareResources(numShaves, numMemorySlices); + + auto featureTrackerConfig = featureTrackerRight->initialConfig.get(); + + printf("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation! \n"); + + // Connect to device and start pipeline + dai::Device device(pipeline); + + // Output queues used to receive the results + auto passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, false); + auto outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, false); + auto passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, false); + auto outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, false); + + auto inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig"); + + const auto leftWindowName = "left"; + auto leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName); + + const auto rightWindowName = "right"; + auto rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName); + + while(true) { + auto inPassthroughFrameLeft = passthroughImageLeftQueue->get(); + cv::Mat passthroughFrameLeft = inPassthroughFrameLeft->getFrame(); + cv::Mat leftFrame; + cv::cvtColor(passthroughFrameLeft, leftFrame, cv::COLOR_GRAY2BGR); + + auto inPassthroughFrameRight = passthroughImageRightQueue->get(); + cv::Mat passthroughFrameRight = inPassthroughFrameRight->getFrame(); + cv::Mat rightFrame; + cv::cvtColor(passthroughFrameRight, rightFrame, cv::COLOR_GRAY2BGR); + + auto trackedFeaturesLeft = outputFeaturesLeftQueue->get()->trackedFeatures; + leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft); + leftFeatureDrawer.drawFeatures(leftFrame); + + auto trackedFeaturesRight = outputFeaturesRightQueue->get()->trackedFeatures; + rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight); + rightFeatureDrawer.drawFeatures(rightFrame); + + // Show the frame + cv::imshow(leftWindowName, leftFrame); + cv::imshow(rightWindowName, rightFrame); + + int key = cv::waitKey(1); + if(key == 'q') { + break; + } else if(key == 's') { + if(featureTrackerConfig.motionEstimator.type == dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW) { + featureTrackerConfig.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::HW_MOTION_ESTIMATION; + printf("Switching to hardware accelerated motion estimation \n"); + } else { + featureTrackerConfig.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW; + printf("Switching to Lucas-Kanade optical flow \n"); + } + auto cfg = dai::FeatureTrackerConfig(); + cfg.set(featureTrackerConfig); + inputFeatureTrackerConfigQueue->send(cfg); + } + } + return 0; +} diff --git a/include/depthai/pipeline/datatype/FeatureTrackerConfig.hpp b/include/depthai/pipeline/datatype/FeatureTrackerConfig.hpp new file mode 100644 index 000000000..eb35ed0a5 --- /dev/null +++ b/include/depthai/pipeline/datatype/FeatureTrackerConfig.hpp @@ -0,0 +1,103 @@ +#pragma once + +#include +#include + +#include "depthai-shared/datatype/RawFeatureTrackerConfig.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" + +namespace dai { + +/** + * FeatureTrackerConfig message. Carries config for feature tracking algorithm + */ +class FeatureTrackerConfig : public Buffer { + std::shared_ptr serialize() const override; + RawFeatureTrackerConfig& cfg; + + public: + // Raw* mirror + using CornerDetector = RawFeatureTrackerConfig::CornerDetector; + using MotionEstimator = RawFeatureTrackerConfig::MotionEstimator; + using FeatureMaintainer = RawFeatureTrackerConfig::FeatureMaintainer; + + /** + * Construct FeatureTrackerConfig message. + */ + FeatureTrackerConfig(); + explicit FeatureTrackerConfig(std::shared_ptr ptr); + virtual ~FeatureTrackerConfig() = default; + + /** + * Set corner detector algorithm type. + * @param cornerDetector Corner detector type, HARRIS or SHI_THOMASI + */ + void setCornerDetector(dai::FeatureTrackerConfig::CornerDetector::Type cornerDetector); + + /** + * Set corner detector full configuration. + * @param config Corner detector configuration + */ + void setCornerDetector(dai::FeatureTrackerConfig::CornerDetector config); + + /** + * Set optical flow as motion estimation algorithm type. + */ + void setOpticalFlow(); + + /** + * Set optical flow full configuration. + * @param config Optical flow configuration + */ + void setOpticalFlow(dai::FeatureTrackerConfig::MotionEstimator::OpticalFlow config); + + /** + * Set hardware accelerated motion estimation using block matching. + * Faster than optical flow (software implementation) but might not be as accurate. + */ + void setHwMotionEstimation(); + + /** + * Set number of target features to detect. + * @param numTargetFeatures Number of features + */ + void setNumTargetFeatures(std::int32_t numTargetFeatures); + + /** + * Enable or disable motion estimator. + * @param enable + */ + void setMotionEstimator(bool enable); + + /** + * Set motion estimator full configuration. + * @param config Motion estimator configuration + */ + void setMotionEstimator(dai::FeatureTrackerConfig::MotionEstimator config); + + /** + * Enable or disable feature maintainer. + * @param enable + */ + void setFeatureMaintainer(bool enable); + + /** + * Set feature maintainer full configuration. + * @param config feature maintainer configuration + */ + void setFeatureMaintainer(dai::FeatureTrackerConfig::FeatureMaintainer config); + + /** + * Set explicit configuration. + * @param config Explicit configuration + */ + void set(dai::RawFeatureTrackerConfig config); + + /** + * Retrieve configuration data for FeatureTracker. + * @returns config for feature tracking algorithm + */ + dai::RawFeatureTrackerConfig get() const; +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatype/TrackedFeatures.hpp b/include/depthai/pipeline/datatype/TrackedFeatures.hpp new file mode 100644 index 000000000..a034ed275 --- /dev/null +++ b/include/depthai/pipeline/datatype/TrackedFeatures.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +#include "depthai-shared/datatype/RawTrackedFeatures.hpp" +#include "depthai/pipeline/datatype/Buffer.hpp" + +namespace dai { + +/** + * TrackedFeatures message. Carries position (X, Y) of tracked features and their ID. + */ +class TrackedFeatures : public Buffer { + std::shared_ptr serialize() const override; + RawTrackedFeatures& rawdata; + + public: + /** + * Construct TrackedFeatures message. + */ + TrackedFeatures(); + explicit TrackedFeatures(std::shared_ptr ptr); + virtual ~TrackedFeatures() = default; + + std::vector& trackedFeatures; +}; + +} // namespace dai diff --git a/include/depthai/pipeline/datatypes.hpp b/include/depthai/pipeline/datatypes.hpp index 7ffb0ce0e..c8a0561b6 100644 --- a/include/depthai/pipeline/datatypes.hpp +++ b/include/depthai/pipeline/datatypes.hpp @@ -4,6 +4,7 @@ #include "datatype/ADatatype.hpp" #include "datatype/Buffer.hpp" #include "datatype/CameraControl.hpp" +#include "datatype/FeatureTrackerConfig.hpp" #include "datatype/IMUData.hpp" #include "datatype/ImageManipConfig.hpp" #include "datatype/ImgDetections.hpp" @@ -13,4 +14,5 @@ #include "datatype/SpatialLocationCalculatorConfig.hpp" #include "datatype/SpatialLocationCalculatorData.hpp" #include "datatype/SystemInformation.hpp" -#include "datatype/Tracklets.hpp" +#include "datatype/TrackedFeatures.hpp" +#include "datatype/Tracklets.hpp" \ No newline at end of file diff --git a/include/depthai/pipeline/node/EdgeDetector.hpp b/include/depthai/pipeline/node/EdgeDetector.hpp index b3aec25cc..3e2ea959a 100644 --- a/include/depthai/pipeline/node/EdgeDetector.hpp +++ b/include/depthai/pipeline/node/EdgeDetector.hpp @@ -52,6 +52,11 @@ class EdgeDetector : public Node { */ Output outputImage{*this, "outputImage", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + /** + * Passthrough message on which the calculation was performed. + */ + Output passthroughInputImage{*this, "passthroughInputImage", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + // Functions to set properties /** * Specify whether or not wait until configuration message arrives to inputConfig Input. diff --git a/include/depthai/pipeline/node/FeatureTracker.hpp b/include/depthai/pipeline/node/FeatureTracker.hpp new file mode 100644 index 000000000..9da286b5d --- /dev/null +++ b/include/depthai/pipeline/node/FeatureTracker.hpp @@ -0,0 +1,79 @@ +#pragma once + +#include + +// standard +#include + +// shared +#include + +#include "depthai/pipeline/datatype/FeatureTrackerConfig.hpp" + +namespace dai { +namespace node { + +/** + * @brief FeatureTracker node. + * Performs feature tracking and reidentification using motion estimation between 2 consecutive frames. + */ +class FeatureTracker : public Node { + public: + using Properties = dai::FeatureTrackerProperties; + + private: + std::string getName() const override; + nlohmann::json getProperties() override; + std::shared_ptr clone() override; + + std::shared_ptr rawConfig; + Properties properties; + + public: + FeatureTracker(const std::shared_ptr& par, int64_t nodeId); + + /** + * Initial config to use for feature tracking. + */ + FeatureTrackerConfig initialConfig; + + /** + * Input FeatureTrackerConfig message with ability to modify parameters in runtime. + * Default queue is non-blocking with size 4. + */ + Input inputConfig{*this, "inputConfig", Input::Type::SReceiver, false, 4, {{DatatypeEnum::FeatureTrackerConfig, false}}}; + /** + * Input message with frame data on which feature tracking is performed. + * Default queue is non-blocking with size 4. + */ + Input inputImage{*this, "inputImage", Input::Type::SReceiver, false, 4, {{DatatypeEnum::ImgFrame, false}}}; + + /** + * Outputs TrackedFeatures message that carries tracked features results. + */ + Output outputFeatures{*this, "outputFeatures", Output::Type::MSender, {{DatatypeEnum::TrackedFeatures, false}}}; + + /** + * Passthrough message on which the calculation was performed. + * Suitable for when input queue is set to non-blocking behavior. + */ + Output passthroughInputImage{*this, "passthroughInputImage", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + + // Functions to set properties + /** + * Specify whether or not wait until configuration message arrives to inputConfig Input. + * @param wait True to wait for configuration message, false otherwise. + */ + void setWaitForConfigInput(bool wait); + + /** + * Specify allocated hardware resources for feature tracking. + * 2 shaves/memory slices are required for optical flow, 1 for corner detection only. + * @param numShaves Number of shaves. Maximum 2. + * @param numMemorySlices Number of memory slices. Maximum 2. + */ + void setHardwareResources(int numShaves, int numMemorySlices); +}; + +} // namespace node +} // namespace dai diff --git a/include/depthai/pipeline/nodes.hpp b/include/depthai/pipeline/nodes.hpp index 8c4f3b013..10b96e1a1 100644 --- a/include/depthai/pipeline/nodes.hpp +++ b/include/depthai/pipeline/nodes.hpp @@ -4,6 +4,7 @@ #include "node/ColorCamera.hpp" #include "node/DetectionNetwork.hpp" #include "node/EdgeDetector.hpp" +#include "node/FeatureTracker.hpp" #include "node/IMU.hpp" #include "node/ImageManip.hpp" #include "node/MonoCamera.hpp" diff --git a/shared/depthai-shared b/shared/depthai-shared index cb3164a3a..a6375bdb7 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit cb3164a3a41e013cd8841eb90e4898276c3517fc +Subproject commit a6375bdb77b0b9c76b12d8202a6a914d3f74b05f diff --git a/src/pipeline/datatype/FeatureTrackerConfig.cpp b/src/pipeline/datatype/FeatureTrackerConfig.cpp new file mode 100644 index 000000000..ab2a76af3 --- /dev/null +++ b/src/pipeline/datatype/FeatureTrackerConfig.cpp @@ -0,0 +1,65 @@ +#include "depthai/pipeline/datatype/FeatureTrackerConfig.hpp" + +namespace dai { + +std::shared_ptr FeatureTrackerConfig::serialize() const { + return raw; +} + +FeatureTrackerConfig::FeatureTrackerConfig() : Buffer(std::make_shared()), cfg(*dynamic_cast(raw.get())) {} +FeatureTrackerConfig::FeatureTrackerConfig(std::shared_ptr ptr) + : Buffer(std::move(ptr)), cfg(*dynamic_cast(raw.get())) {} + +dai::RawFeatureTrackerConfig FeatureTrackerConfig::get() const { + return cfg; +} + +void FeatureTrackerConfig::setCornerDetector(dai::FeatureTrackerConfig::CornerDetector::Type cornerDetector) { + cfg.cornerDetector.type = cornerDetector; +} + +void FeatureTrackerConfig::setCornerDetector(dai::FeatureTrackerConfig::CornerDetector config) { + cfg.cornerDetector = config; +} + +void FeatureTrackerConfig::setMotionEstimator(bool enable) { + cfg.motionEstimator.enable = enable; +} + +void FeatureTrackerConfig::setMotionEstimator(dai::FeatureTrackerConfig::MotionEstimator config) { + cfg.motionEstimator = config; +} + +void FeatureTrackerConfig::setOpticalFlow() { + cfg.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW; + setMotionEstimator(true); +} + +void FeatureTrackerConfig::setOpticalFlow(dai::FeatureTrackerConfig::MotionEstimator::OpticalFlow config) { + cfg.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::LUCAS_KANADE_OPTICAL_FLOW; + cfg.motionEstimator.opticalFlow = config; + setMotionEstimator(true); +} + +void FeatureTrackerConfig::setHwMotionEstimation() { + cfg.motionEstimator.type = dai::FeatureTrackerConfig::MotionEstimator::Type::HW_MOTION_ESTIMATION; + setMotionEstimator(true); +} + +void FeatureTrackerConfig::setFeatureMaintainer(bool enable) { + cfg.featureMaintainer.enable = enable; +} + +void FeatureTrackerConfig::setFeatureMaintainer(dai::FeatureTrackerConfig::FeatureMaintainer config) { + cfg.featureMaintainer = config; +} + +void FeatureTrackerConfig::set(dai::RawFeatureTrackerConfig config) { + cfg = config; +} + +void FeatureTrackerConfig::setNumTargetFeatures(std::int32_t numTargetFeatures) { + cfg.cornerDetector.numTargetFeatures = numTargetFeatures; +} + +} // namespace dai diff --git a/src/pipeline/datatype/StreamPacketParser.cpp b/src/pipeline/datatype/StreamPacketParser.cpp index a4ba4f45c..a6c3a6395 100644 --- a/src/pipeline/datatype/StreamPacketParser.cpp +++ b/src/pipeline/datatype/StreamPacketParser.cpp @@ -15,6 +15,7 @@ #include "depthai/pipeline/datatype/Buffer.hpp" #include "depthai/pipeline/datatype/CameraControl.hpp" #include "depthai/pipeline/datatype/EdgeDetectorConfig.hpp" +#include "depthai/pipeline/datatype/FeatureTrackerConfig.hpp" #include "depthai/pipeline/datatype/IMUData.hpp" #include "depthai/pipeline/datatype/ImageManipConfig.hpp" #include "depthai/pipeline/datatype/ImgDetections.hpp" @@ -25,6 +26,7 @@ #include "depthai/pipeline/datatype/SpatialLocationCalculatorData.hpp" #include "depthai/pipeline/datatype/StereoDepthConfig.hpp" #include "depthai/pipeline/datatype/SystemInformation.hpp" +#include "depthai/pipeline/datatype/TrackedFeatures.hpp" #include "depthai/pipeline/datatype/Tracklets.hpp" // shared @@ -32,6 +34,7 @@ #include "depthai-shared/datatype/RawBuffer.hpp" #include "depthai-shared/datatype/RawCameraControl.hpp" #include "depthai-shared/datatype/RawEdgeDetectorConfig.hpp" +#include "depthai-shared/datatype/RawFeatureTrackerConfig.hpp" #include "depthai-shared/datatype/RawIMUData.hpp" #include "depthai-shared/datatype/RawImageManipConfig.hpp" #include "depthai-shared/datatype/RawImgDetections.hpp" @@ -141,6 +144,14 @@ std::shared_ptr parsePacket(streamPacketDesc_t* packet) { case DatatypeEnum::EdgeDetectorConfig: return parseDatatype(jser, data); break; + + case DatatypeEnum::TrackedFeatures: + return parseDatatype(jser, data); + break; + + case DatatypeEnum::FeatureTrackerConfig: + return parseDatatype(jser, data); + break; } throw std::runtime_error("Bad packet, couldn't parse"); @@ -223,6 +234,14 @@ std::shared_ptr parsePacketToADatatype(streamPacketDesc_t* packet) { case DatatypeEnum::EdgeDetectorConfig: return std::make_shared(parseDatatype(jser, data)); break; + + case DatatypeEnum::TrackedFeatures: + return std::make_shared(parseDatatype(jser, data)); + break; + + case DatatypeEnum::FeatureTrackerConfig: + return std::make_shared(parseDatatype(jser, data)); + break; } throw std::runtime_error("Bad packet, couldn't parse"); diff --git a/src/pipeline/datatype/TrackedFeatures.cpp b/src/pipeline/datatype/TrackedFeatures.cpp new file mode 100644 index 000000000..1fa34051a --- /dev/null +++ b/src/pipeline/datatype/TrackedFeatures.cpp @@ -0,0 +1,14 @@ +#include "depthai/pipeline/datatype/TrackedFeatures.hpp" + +namespace dai { + +std::shared_ptr TrackedFeatures::serialize() const { + return raw; +} + +TrackedFeatures::TrackedFeatures() + : Buffer(std::make_shared()), rawdata(*dynamic_cast(raw.get())), trackedFeatures(rawdata.trackedFeatures) {} +TrackedFeatures::TrackedFeatures(std::shared_ptr ptr) + : Buffer(std::move(ptr)), rawdata(*dynamic_cast(raw.get())), trackedFeatures(rawdata.trackedFeatures) {} + +} // namespace dai diff --git a/src/pipeline/node/EdgeDetector.cpp b/src/pipeline/node/EdgeDetector.cpp index 2c5c1a7c7..436e7faa8 100644 --- a/src/pipeline/node/EdgeDetector.cpp +++ b/src/pipeline/node/EdgeDetector.cpp @@ -8,7 +8,7 @@ namespace node { EdgeDetector::EdgeDetector(const std::shared_ptr& par, int64_t nodeId) : Node(par, nodeId), rawConfig(std::make_shared()), initialConfig(rawConfig) { inputs = {&inputConfig, &inputImage}; - outputs = {&outputImage}; + outputs = {&outputImage, &passthroughInputImage}; } std::string EdgeDetector::getName() const { diff --git a/src/pipeline/node/FeatureTracker.cpp b/src/pipeline/node/FeatureTracker.cpp new file mode 100644 index 000000000..a27c995ae --- /dev/null +++ b/src/pipeline/node/FeatureTracker.cpp @@ -0,0 +1,40 @@ +#include "depthai/pipeline/node/FeatureTracker.hpp" + +#include "spdlog/fmt/fmt.h" + +namespace dai { +namespace node { + +FeatureTracker::FeatureTracker(const std::shared_ptr& par, int64_t nodeId) + : Node(par, nodeId), rawConfig(std::make_shared()), initialConfig(rawConfig) { + inputs = {&inputConfig, &inputImage}; + outputs = {&outputFeatures, &passthroughInputImage}; +} + +std::string FeatureTracker::getName() const { + return "FeatureTracker"; +} + +nlohmann::json FeatureTracker::getProperties() { + nlohmann::json j; + properties.initialConfig = *rawConfig; + nlohmann::to_json(j, properties); + return j; +} + +// Node properties configuration +void FeatureTracker::setWaitForConfigInput(bool wait) { + properties.inputConfigSync = wait; +} + +void FeatureTracker::setHardwareResources(int numShaves, int numMemorySlices) { + properties.numShaves = numShaves; + properties.numMemorySlices = numMemorySlices; +} + +std::shared_ptr FeatureTracker::clone() { + return std::make_shared::type>(*this); +} + +} // namespace node +} // namespace dai