From 28179774839751bd454326b5e719d1cc7cb95f49 Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Mon, 13 Jan 2025 10:00:22 +0200 Subject: [PATCH 1/3] [NPU] Use zero tensor to get correct data (#27980) ### Details: - *Can not deallocate and re-allocate a newer memory for tensor if update mutable command list is not supported. Newer memory should be updated in the graph and this can be done only using the updating command list feature* - *In order to check if the memory was or wasn't re-allocated we are checking the unique ID provided by the driver when memory is created* ### Tickets: - *E#134453* --------- Signed-off-by: Bogdan Pereanu --- .../backend/include/zero_infer_request.hpp | 9 +- .../src/backend/include/zero_pipeline.hpp | 20 +- .../backend/include/zero_remote_tensor.hpp | 6 +- .../src/backend/include/zero_tensor.hpp | 66 ++++++ .../src/backend/src/zero_infer_request.cpp | 196 ++++++++++-------- .../src/backend/src/zero_pipeline.cpp | 134 ++++++++---- .../intel_npu/src/backend/src/zero_tensor.cpp | 152 ++++++++++++++ .../intel_npu/common/remote_tensor.hpp | 6 +- .../intel_npu/common/sync_infer_request.hpp | 4 + .../src/common/src/remote_tensor.cpp | 17 +- .../src/common/src/sync_infer_request.cpp | 10 +- .../functional/behavior/infer_request_run.cpp | 6 + .../functional/behavior/infer_request_run.hpp | 71 +++++++ .../overload/ov_infer_request/io_tensor.hpp | 5 + .../behavior/ov_infer_request/io_tensor.cpp | 10 +- .../skip_tests_config.cpp | 7 - 16 files changed, 556 insertions(+), 163 deletions(-) create mode 100644 src/plugins/intel_npu/src/backend/include/zero_tensor.hpp create mode 100644 src/plugins/intel_npu/src/backend/src/zero_tensor.cpp diff --git a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp index 1e8781b0afe820..eb103c493e4ef4 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp @@ -15,6 +15,7 @@ #include "zero_pipeline.hpp" #include "zero_profiling.hpp" #include "zero_remote_tensor.hpp" +#include "zero_tensor.hpp" namespace intel_npu { @@ -62,8 +63,9 @@ class ZeroInferRequest final : public SyncInferRequest { std::shared_ptr& get_level_zero_input(size_t index, size_t tensorNo = 0) const; std::vector>& get_level_zero_inputs(size_t index) const; - std::optional& get_input_tensor_data(size_t index, size_t tensorNo = 0) const; - std::vector>& get_input_tensors_data(size_t index) const; + std::shared_ptr create_tensor(ov::element::Type type, + const ov::Shape& shape, + const ov::Allocator& allocator = {}) const override; const std::shared_ptr _initStructs; const std::shared_ptr _graph; @@ -75,9 +77,6 @@ class ZeroInferRequest final : public SyncInferRequest { mutable std::vector>> _levelZeroInputTensors; mutable std::vector> _levelZeroOutputTensors; - mutable std::vector>> _inputTensorsData; - mutable std::vector> _outputTensorsData; - ze_device_properties_t _properties = {}; std::shared_ptr _inputAllocator; std::shared_ptr _outputAllocator; diff --git a/src/plugins/intel_npu/src/backend/include/zero_pipeline.hpp b/src/plugins/intel_npu/src/backend/include/zero_pipeline.hpp index de5e1ac81c4728..1bc06d174f62c1 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_pipeline.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_pipeline.hpp @@ -9,25 +9,20 @@ #include "intel_npu/utils/zero/zero_wrappers.hpp" #include "zero_memory.hpp" #include "zero_profiling.hpp" +#include "zero_tensor.hpp" namespace intel_npu { -struct TensorData { - void* mem; - size_t size; - bool levelZeroTensorCreatedLocally = true; -}; - struct Pipeline { public: Pipeline(const Config& config, - const std::shared_ptr& initStructs, + const std::shared_ptr& init_structs, const std::shared_ptr& graph, zeroProfiling::ProfilingPool& profiling_pool, zeroProfiling::ProfilingQuery& profiling_query, const std::shared_ptr& npu_profiling, - const std::vector>>& inputTensorsData, - const std::vector>& outputTensorsData, + const std::vector>>& input_tensors, + const std::vector>& output_tensors, uint32_t group_ordinal); Pipeline(const Pipeline&) = delete; @@ -38,8 +33,11 @@ struct Pipeline { void pull(); void reset() const; - void updateCommandList(const TensorData& tensorsData, uint32_t index); - void updateCommandList(const TensorData& tensorsData, uint32_t index, size_t commandListIndex); + void updateCommandList(uint32_t arg_index, const void* arg_data, size_t byte_size); + void updateCommandListIndex(uint32_t arg_index, const void* arg_data, size_t command_list_index); + + void closeCommandList(); + void closeCommandListIndex(size_t command_list_index); protected: std::shared_ptr _graph; diff --git a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp index aabe5e9bb4e1bf..e102cb3f8e560e 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp @@ -14,7 +14,7 @@ namespace intel_npu { -class ZeroRemoteTensor : public RemoteTensor { +class ZeroRemoteTensor final : public RemoteTensor { public: ZeroRemoteTensor(const std::shared_ptr& context, const std::shared_ptr& init_structs, @@ -48,4 +48,8 @@ class ZeroRemoteTensor : public RemoteTensor { bool _external_memory_support = false; }; +inline bool is_remote_tensor(const std::shared_ptr& tensor) { + return std::dynamic_pointer_cast(tensor) != nullptr; +} + } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/include/zero_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_tensor.hpp new file mode 100644 index 00000000000000..a2a39ee301d6fc --- /dev/null +++ b/src/plugins/intel_npu/src/backend/include/zero_tensor.hpp @@ -0,0 +1,66 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include "intel_npu/config/config.hpp" +#include "intel_npu/utils/zero/zero_init.hpp" +#include "openvino/runtime/common.hpp" +#include "openvino/runtime/itensor.hpp" +#include "openvino/runtime/so_ptr.hpp" + +namespace intel_npu { + +/** + * @brief Constructs Tensor using element type and shape. Allocate internal host storage using custom allocator. + * @details The implementation is simillar to the AllocatedTensor class from OV namespace. + * @note Set_shape method throws an error in case re-allocation is needed but this is not supported by the driver. + * There are two extra methods to notify the consumer if memory changed or not and to reset the flag. + */ +class ZeroTensor final : public ov::ITensor { +public: + ZeroTensor(const std::shared_ptr& init_structs, + const ov::element::Type element_type, + const ov::Shape& shape, + const ov::Allocator& allocator); + + void* data(const ov::element::Type& type = {}) const override; + + const ov::element::Type& get_element_type() const override; + + const ov::Shape& get_shape() const override; + + void set_shape(ov::Shape new_shape) override; + + const ov::Strides& get_strides() const override; + + bool memory_address_changed(); + void reset_memory_flag(); + + ~ZeroTensor(); + +private: + static void initialize_elements(void* data, const ov::element::Type& element_type, const ov::Shape& shape); + void update_strides() const; + size_t get_capacity() const; + size_t get_bytes_capacity() const; + void destroy_elements(size_t begin_ind, size_t end_ind); + void destroy_memory(); + + std::shared_ptr _init_structs; + + ov::element::Type _element_type; + ov::Shape _shape; + ov::Shape _capacity; + mutable ov::Strides _strides; + mutable std::once_flag _strides_once; + ov::Allocator _allocator; + void* _ptr = nullptr; + bool _reset_tensor_memory = false; +}; + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index a0e5d2d11c1fef..c8c7cb8854d53e 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -12,7 +12,6 @@ #include "intel_npu/utils/zero/zero_api.hpp" #include "openvino/op/util/op_types.hpp" #include "openvino/runtime/intel_npu/remote_properties.hpp" -#include "openvino/runtime/make_tensor.hpp" #include "zero_memory.hpp" using namespace intel_npu; @@ -105,8 +104,6 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& _logger("ZeroInferRequest", config.get()), _levelZeroInputTensors(_metadata.inputs.size(), std::vector>(1, nullptr)), _levelZeroOutputTensors(_metadata.outputs.size(), nullptr), - _inputTensorsData(_metadata.inputs.size(), std::vector>(1, std::nullopt)), - _outputTensorsData(_metadata.outputs.size(), std::nullopt), _profilingPool(_initStructs, _graph, zeroProfiling::POOL_SIZE), _profilingQuery(_initStructs, 0) { _logger.debug("ZeroInferRequest::ZeroInferRequest - SyncInferRequest"); @@ -140,8 +137,6 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& get_level_zero_input(ioIndex) = allocate_tensor(inputDescriptor, ioIndex, INPUT, *_inputAllocator, _graph->get_batch_size()); - get_input_tensor_data(ioIndex) = - TensorData{get_level_zero_input(ioIndex)->data(), get_level_zero_input(ioIndex)->get_byte_size()}; ++ioIndex; } @@ -157,9 +152,6 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& _levelZeroOutputTensors.at(ioIndex) = allocate_tensor(outputDescriptor, ioIndex, OUTPUT, *_outputAllocator, _graph->get_batch_size()); - _outputTensorsData.at(ioIndex) = - std::optional(TensorData{_levelZeroOutputTensors.at(ioIndex)->data(), - _levelZeroOutputTensors.at(ioIndex)->get_byte_size()}); ++ioIndex; } @@ -183,14 +175,13 @@ void ZeroInferRequest::create_pipeline() { continue; } - _logger.debug("ZeroInferRequest::create_pipeline - allocate new tensor"); + _logger.debug("ZeroInferRequest::create_pipeline - allocate new input tensor %s", + _metadata.inputs.at(inputIndex).nodeFriendlyName.c_str()); get_level_zero_input(inputIndex) = allocate_tensor(_metadata.inputs.at(inputIndex), inputIndex, INPUT, *_inputAllocator, _graph->get_batch_size()); - get_input_tensor_data(inputIndex) = std::optional( - TensorData{get_level_zero_input(inputIndex)->data(), get_level_zero_input(inputIndex)->get_byte_size()}); } for (size_t outputIndex = 0; outputIndex < _metadata.outputs.size(); ++outputIndex) { @@ -199,15 +190,13 @@ void ZeroInferRequest::create_pipeline() { _metadata.outputs.at(outputIndex).nodeFriendlyName.c_str()); continue; } - _logger.debug("ZeroInferRequest::create_pipeline - allocate new tensor"); + _logger.debug("ZeroInferRequest::create_pipeline - allocate new output tensor %s", + _metadata.outputs.at(outputIndex).nodeFriendlyName.c_str()); _levelZeroOutputTensors.at(outputIndex) = allocate_tensor(_metadata.outputs.at(outputIndex), outputIndex, OUTPUT, *_outputAllocator, _graph->get_batch_size()); - _outputTensorsData.at(outputIndex) = - std::optional(TensorData{_levelZeroOutputTensors.at(outputIndex)->data(), - _levelZeroOutputTensors.at(outputIndex)->get_byte_size()}); } // Find the corresponding command queue group. @@ -224,8 +213,8 @@ void ZeroInferRequest::create_pipeline() { _profilingPool, _profilingQuery, _npuProfiling, - _inputTensorsData, - _outputTensorsData, + _levelZeroInputTensors, + _levelZeroOutputTensors, groupOrdinal); _logger.debug("ZeroInferRequest::create_pipeline - SyncInferRequest completed"); @@ -236,23 +225,15 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr& tenso const bool isInput) { OV_ITT_TASK_CHAIN(ZERO_SET_TENSOR, itt::domains::LevelZeroBackend, "set_tensor", "set_tensor_data"); auto& levelZeroTensors = isInput ? get_level_zero_input(index) : _levelZeroOutputTensors.at(index); - auto& tensorsData = isInput ? get_input_tensor_data(index) : _outputTensorsData.at(index); - bool setTensorData = false; - bool levelZeroTensorCreatedLocally = true; + const auto& zeroTensor = std::dynamic_pointer_cast(tensor); - OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation"); - if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { - _logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context"); - levelZeroTensors = tensor; - levelZeroTensorCreatedLocally = false; - setTensorData = true; - } - - if (!setTensorData) { - // make sure that the L0 tensor was allocated locally and is not received from the user when receiving - // random tensor - if (tensorsData.has_value() && !tensorsData->levelZeroTensorCreatedLocally) { + if (zeroTensor == nullptr) { + OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation"); + if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { + _logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context"); + levelZeroTensors = tensor; + } else { _logger.debug("ZeroInferRequest::set_tensor_data - create locally L0 tensor"); OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "allocate tensor"); @@ -261,23 +242,19 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr& tenso isInput, isInput ? *_inputAllocator : *_outputAllocator, _graph->get_batch_size()); - - setTensorData = true; - levelZeroTensorCreatedLocally = true; } - } - - if (setTensorData) { - tensorsData = std::optional( - TensorData{levelZeroTensors->data(), levelZeroTensors->get_byte_size(), levelZeroTensorCreatedLocally}); if (_pipelineIsCreated) { _logger.debug("ZeroInferRequest::infer_async - update command list"); + OPENVINO_ASSERT(levelZeroTensors->data(), "Empty buffer"); + OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "updateCommandList"); - _pipeline->updateCommandList(*tensorsData, - isInput ? _graph->get_input_descriptors().at(index).idx - : _graph->get_output_descriptors().at(index).idx); + _pipeline->updateCommandList(isInput ? _graph->get_input_descriptors().at(index).idx + : _graph->get_output_descriptors().at(index).idx, + levelZeroTensors->data(), + levelZeroTensors->get_byte_size()); + _pipeline->closeCommandList(); } } } @@ -294,23 +271,20 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptrget_properties(), ov::intel_npu::mem_handle); - if (data == nullptr) { - OPENVINO_THROW("Empty buffer"); - } + OPENVINO_ASSERT(data, "Empty buffer"); auto& levelZeroTensors = isInput ? get_level_zero_input(index) : _levelZeroOutputTensors.at(index); - auto& tensorsData = isInput ? get_input_tensor_data(index) : _outputTensorsData.at(index); - levelZeroTensors = tensor; - tensorsData = std::optional(TensorData{data, tensor->get_byte_size(), false}); if (_pipelineIsCreated) { _logger.debug("ZeroInferRequest::infer_async - update command list"); OV_ITT_TASK_NEXT(ZERO_SET_REMOTE_TENSOR, "updateCommandList"); _pipeline->updateCommandList( - *tensorsData, - isInput ? _graph->get_input_descriptors().at(index).idx : _graph->get_output_descriptors().at(index).idx); + isInput ? _graph->get_input_descriptors().at(index).idx : _graph->get_output_descriptors().at(index).idx, + data, + tensor->get_byte_size()); + _pipeline->closeCommandList(); } } @@ -333,8 +307,6 @@ void ZeroInferRequest::set_tensor(const ov::Output& port, const } if (is_batched_input(foundPort.idx)) { // resize vector size to 1 if set_tensor is called after set_tensors - get_input_tensors_data(foundPort.idx).resize(1); - get_input_tensors_data(foundPort.idx).shrink_to_fit(); get_level_zero_inputs(foundPort.idx).resize(1); get_level_zero_inputs(foundPort.idx).shrink_to_fit(); get_user_inputs(foundPort.idx).resize(1); @@ -345,6 +317,7 @@ void ZeroInferRequest::set_tensor(const ov::Output& port, const } else { if (_userOutputTensors.at(foundPort.idx)._ptr == tensor._ptr) { // Got set_tensor with the same object here too - do nothing + _logger.debug("ZeroInferRequest::set_tensor - got the same tensor, do nothing"); return; } _userOutputTensors.at(foundPort.idx) = tensor; @@ -382,13 +355,14 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, get_user_inputs(foundPort.idx).resize(tensors.size()); get_user_inputs(foundPort.idx) = tensors; + void* data = nullptr; + if (_initStructs->getMutableCommandListVersion()) { if (_graph->get_batch_size().has_value()) { for (size_t i = 0; i < tensors.size(); i++) { auto remoteTensor = std::dynamic_pointer_cast(tensors[i]._ptr); get_level_zero_inputs(foundPort.idx).resize(tensors.size()); - get_input_tensors_data(foundPort.idx).resize(tensors.size()); if (remoteTensor == nullptr) { bool tensorHasSameL0Context = false; @@ -409,26 +383,22 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, allocate_tensor(_metadata.inputs.at(foundPort.idx), foundPort.idx, true, *_inputAllocator); } - get_input_tensor_data(foundPort.idx, i) = - std::optional(TensorData{get_level_zero_input(foundPort.idx, i)->data(), - get_level_zero_input(foundPort.idx, i)->get_byte_size(), - false}); + data = get_level_zero_input(foundPort.idx, i)->data(); } else { _logger.debug("ZeroInferRequest::set_tensors - remote tensor is used"); - get_input_tensor_data(foundPort.idx, i) = std::optional( - TensorData{extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle), - remoteTensor->get_byte_size(), - false}); + data = extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; } if (_pipelineIsCreated) { OV_ITT_TASK_NEXT(SET_TENSORS, "updateCommandList"); - _pipeline->updateCommandList(*get_input_tensor_data(foundPort.idx, i), - _graph->get_input_descriptors().at(foundPort.idx).idx, - i); + + OPENVINO_ASSERT(data, "Empty buffer"); + + _pipeline->updateCommandListIndex(_graph->get_input_descriptors().at(foundPort.idx).idx, data, i); + _pipeline->closeCommandListIndex(i); } } } @@ -455,17 +425,17 @@ ov::SoPtr ZeroInferRequest::get_tensor(const ov::Outputget_batch_size()); - tensorsData = std::optional(TensorData{levelZeroTensors->data(), levelZeroTensors->get_byte_size()}); return levelZeroTensors; } @@ -491,6 +461,67 @@ void ZeroInferRequest::infer_async() { create_pipeline(); _pipelineIsCreated = true; + } else { + if (_initStructs->getMutableCommandListVersion()) { + bool closePipeline = false; + size_t ioIndex = 0; + + for (const auto& levelZeroTensor : _levelZeroInputTensors) { + const auto inputDescriptor = _metadata.inputs.at(ioIndex); + auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor.at(SINGLE_TENSOR)); + + if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || inputDescriptor.isStateInput || + is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update input graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_input_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + zeroTensor->reset_memory_flag(); + } + + ++ioIndex; + } + + ioIndex = 0; + + for (const auto& levelZeroTensor : _levelZeroOutputTensors) { + const auto outputDescriptor = _metadata.outputs.at(ioIndex); + auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); + + if (outputDescriptor.isShapeTensor || outputDescriptor.isStateOutput || + is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update output graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_output_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + zeroTensor->reset_memory_flag(); + } + + ++ioIndex; + } + + if (closePipeline) { + _pipeline->closeCommandList(); + } + } } } @@ -512,9 +543,7 @@ void ZeroInferRequest::infer_async() { if (is_batched_input(inputIndex)) { if (_graph->get_batch_size().has_value()) { for (size_t i = 0; i < userTensor.size(); i++) { - auto levelZeroBatchRemoteTensor = - std::dynamic_pointer_cast(get_level_zero_input(inputIndex, i)); - if (levelZeroBatchRemoteTensor == nullptr) { + if (!is_remote_tensor(get_level_zero_input(inputIndex, i))) { void* levelZeroBuffer = get_level_zero_input(inputIndex, i)->data(); auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); @@ -563,9 +592,8 @@ void ZeroInferRequest::infer_async() { ? userTensor.at(SINGLE_TENSOR)->data() : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); - const std::shared_ptr& levelZeroTensor = get_level_zero_input(inputIndex); - auto levelZeroRemoteTensor = std::dynamic_pointer_cast(levelZeroTensor); - if (levelZeroRemoteTensor == nullptr) { + const auto& levelZeroTensor = get_level_zero_input(inputIndex); + if (!is_remote_tensor(levelZeroTensor)) { void* levelZeroBuffer = levelZeroTensor->data(); if (userBuffer != levelZeroBuffer) { @@ -616,8 +644,7 @@ void ZeroInferRequest::get_result() { : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); const std::shared_ptr& levelZeroTensor = _levelZeroOutputTensors.at(outputIndex); - auto levelZeroRemoteTensor = std::dynamic_pointer_cast(levelZeroTensor); - if (levelZeroRemoteTensor == nullptr) { + if (!is_remote_tensor(levelZeroTensor)) { void* levelZeroBuffer = levelZeroTensor->data(); if (userBuffer != levelZeroBuffer) { @@ -703,6 +730,14 @@ std::vector ZeroInferRequest::get_profiling_info() const { } } +std::shared_ptr ZeroInferRequest::create_tensor(ov::element::Type type, + const ov::Shape& shape, + const ov::Allocator& allocator) const { + OPENVINO_ASSERT(allocator, "Allocator mush be provided when creating a zero tensor!"); + + return std::make_shared(_initStructs, type, shape, allocator); +} + std::vector ZeroInferRequest::get_raw_profiling_data() const { return _profilingQuery.getData(); } @@ -714,10 +749,3 @@ std::shared_ptr& ZeroInferRequest::get_level_zero_input(size_t inde std::vector>& ZeroInferRequest::get_level_zero_inputs(size_t index) const { return _levelZeroInputTensors.at(index); } - -std::optional& ZeroInferRequest::get_input_tensor_data(size_t index, size_t tensorNo) const { - return _inputTensorsData.at(index).at(tensorNo); -} -std::vector>& ZeroInferRequest::get_input_tensors_data(size_t index) const { - return _inputTensorsData.at(index); -} diff --git a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp index d7f06b813810bb..ef36c98b916311 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp @@ -13,25 +13,41 @@ #include "intel_npu/utils/logger/logger.hpp" #include "intel_npu/utils/zero/zero_api.hpp" #include "intel_npu/utils/zero/zero_types.hpp" +#include "zero_remote_tensor.hpp" + +namespace { + +template +Type extract_object(const ov::AnyMap& params, const ov::Property& p) { + auto itrHandle = params.find(p.name()); + ov::Any res = nullptr; + if (itrHandle == params.end()) { + OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); + } + res = itrHandle->second; + return res.as(); +} + +} // namespace namespace intel_npu { Pipeline::Pipeline(const Config& config, - const std::shared_ptr& initStructs, + const std::shared_ptr& init_structs, const std::shared_ptr& graph, zeroProfiling::ProfilingPool& profiling_pool, zeroProfiling::ProfilingQuery& profiling_query, const std::shared_ptr& npu_profiling, - const std::vector>>& inputTensorsData, - const std::vector>& outputTensorsData, + const std::vector>>& input_tensors, + const std::vector>& output_tensors, uint32_t group_ordinal) : _graph(graph), _config(config), _id(_graph->get_unique_id()), _number_of_command_lists(_graph->get_batch_size().has_value() ? *_graph->get_batch_size() : 1), _event_pool{ - std::make_shared(initStructs->getDevice(), - initStructs->getContext(), + std::make_shared(init_structs->getDevice(), + init_structs->getContext(), _number_of_command_lists ? static_cast(_number_of_command_lists) : 1)}, _npu_profiling(npu_profiling), _logger("Pipeline", _config.get()) { @@ -48,36 +64,62 @@ Pipeline::Pipeline(const Config& config, _logger.debug("Pipeline - emplace_back _event_pool and _command_queue"); for (size_t i = 0; i < _number_of_command_lists; i++) { _command_lists.emplace_back( - std::make_unique(initStructs, + std::make_unique(init_structs, group_ordinal, - initStructs->getMutableCommandListVersion() ? true : false)); + init_structs->getMutableCommandListVersion() ? true : false)); _events.emplace_back(std::make_shared(_event_pool, static_cast(i))); _fences.emplace_back(std::make_unique(*_graph->get_command_queue())); } for (size_t i = 0; i < _number_of_command_lists; i++) { - size_t ioIndex = 0; + size_t io_index = 0; for (const auto& desc : graph->get_input_descriptors()) { - if (inputTensorsData.at(ioIndex).size() > 1) { - graph->set_argument_value(desc.idx, inputTensorsData.at(ioIndex).at(i)->mem); - - ++ioIndex; + if (input_tensors.at(io_index).size() > 1) { + void* data = nullptr; + auto remote_tensor = std::dynamic_pointer_cast(input_tensors.at(io_index).at(i)); + if (remote_tensor == nullptr) { + data = input_tensors.at(io_index).at(i)->data(); + } else { + data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + } + + graph->set_argument_value(desc.idx, data); + + ++io_index; continue; } - graph->set_argument_value(desc.idx, - static_cast(inputTensorsData.at(ioIndex).at(0)->mem) + - (i * inputTensorsData.at(ioIndex).at(0)->size) / _number_of_command_lists); + void* data = nullptr; + auto remote_tensor = std::dynamic_pointer_cast(input_tensors.at(io_index).at(0)); + if (remote_tensor == nullptr) { + data = input_tensors.at(io_index).at(0)->data(); + } else { + data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + } + + graph->set_argument_value( + desc.idx, + static_cast(data) + + (i * input_tensors.at(io_index).at(0)->get_byte_size()) / _number_of_command_lists); - ++ioIndex; + ++io_index; } - ioIndex = 0; + io_index = 0; for (const auto& desc : graph->get_output_descriptors()) { - graph->set_argument_value(desc.idx, - static_cast(outputTensorsData.at(ioIndex)->mem) + - (i * outputTensorsData.at(ioIndex)->size) / _number_of_command_lists); - ++ioIndex; + void* data = nullptr; + auto remote_tensor = std::dynamic_pointer_cast(output_tensors.at(io_index)); + if (remote_tensor == nullptr) { + data = output_tensors.at(io_index)->data(); + } else { + data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + } + + graph->set_argument_value( + desc.idx, + static_cast(data) + + (i * output_tensors.at(io_index)->get_byte_size()) / _number_of_command_lists); + ++io_index; } if (_config.get()) { @@ -180,32 +222,54 @@ void Pipeline::reset() const { _logger.debug("Pipeline - rest() completed"); }; -void Pipeline::updateCommandList(const TensorData& tensorsData, uint32_t index) { +void Pipeline::updateCommandList(uint32_t arg_index, const void* arg_data, size_t byte_size) { OV_ITT_TASK_CHAIN(ZERO_EXECUTOR_IP_UMCL, itt::domains::LevelZeroBackend, "Pipeline", "updateCommandList"); _logger.debug("Pipeline - updateCommandList"); - const size_t _number_of_command_lists = _command_lists.size(); + const size_t number_of_command_lists = _command_lists.size(); - for (size_t i = 0; i < _number_of_command_lists; i++) { + for (size_t i = 0; i < number_of_command_lists; i++) { _command_lists.at(i)->updateMutableCommandList( - index, - static_cast(tensorsData.mem) + (i * tensorsData.size) / _number_of_command_lists); + arg_index, + static_cast(arg_data) + (i * byte_size) / number_of_command_lists); + } +}; + +void Pipeline::closeCommandList() { + OV_ITT_TASK_CHAIN(ZERO_EXECUTOR_IP_UMCL, itt::domains::LevelZeroBackend, "Pipeline", "closeCommandList"); + _logger.debug("Pipeline - closeCommandList"); + + const size_t number_of_command_lists = _command_lists.size(); + + for (size_t i = 0; i < number_of_command_lists; i++) { _command_lists.at(i)->close(); } }; -void Pipeline::updateCommandList(const TensorData& tensorsData, uint32_t index, size_t commandListIndex) { - OV_ITT_TASK_CHAIN(ZERO_EXECUTOR_IP_UMCL, itt::domains::LevelZeroBackend, "Pipeline", "updateCommandList"); - _logger.debug("Pipeline - updateCommandList"); +void Pipeline::updateCommandListIndex(uint32_t arg_index, const void* arg_data, size_t command_list_index) { + OV_ITT_TASK_CHAIN(ZERO_EXECUTOR_IP_UMCL, itt::domains::LevelZeroBackend, "Pipeline", "updateCommandListIndex"); + _logger.debug("Pipeline - updateCommandListIndex"); + + const size_t number_of_command_lists = _command_lists.size(); + + OPENVINO_ASSERT(command_list_index < number_of_command_lists, + "Command list index is higher than the number of Command lists ", + command_list_index); + + _command_lists.at(command_list_index)->updateMutableCommandList(arg_index, arg_data); +}; + +void Pipeline::closeCommandListIndex(size_t command_list_index) { + OV_ITT_TASK_CHAIN(ZERO_EXECUTOR_IP_UMCL, itt::domains::LevelZeroBackend, "Pipeline", "closeCommandListIndex"); + _logger.debug("Pipeline - closeCommandListIndex"); - const size_t _number_of_command_lists = _command_lists.size(); + const size_t number_of_command_lists = _command_lists.size(); - OPENVINO_ASSERT(commandListIndex < _number_of_command_lists, - "Command list index is higgher than the number of Command lists ", - commandListIndex); + OPENVINO_ASSERT(command_list_index < number_of_command_lists, + "Command list index is higher than the number of Command lists ", + command_list_index); - _command_lists.at(commandListIndex)->updateMutableCommandList(index, tensorsData.mem); - _command_lists.at(commandListIndex)->close(); + _command_lists.at(command_list_index)->close(); }; } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp b/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp new file mode 100644 index 00000000000000..b2b5cc7c9b166e --- /dev/null +++ b/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp @@ -0,0 +1,152 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "zero_tensor.hpp" + +#include "openvino/core/type/element_iterator.hpp" +#include "openvino/runtime/properties.hpp" +#include "openvino/runtime/tensor.hpp" + +namespace intel_npu { + +ZeroTensor::ZeroTensor(const std::shared_ptr& init_structs, + const ov::element::Type element_type, + const ov::Shape& shape, + const ov::Allocator& allocator) + : _init_structs(init_structs), + _element_type{element_type}, + _shape{shape}, + _capacity{_shape}, + _strides{}, + _strides_once{}, + _allocator{allocator} { + OPENVINO_ASSERT(_element_type != ov::element::undefined && _element_type.is_static()); + OPENVINO_ASSERT(allocator, "Allocator was not initialized"); + const auto byte_size = ov::element::get_memory_size(_element_type, shape_size(_shape)); + auto data = const_cast(_allocator).allocate(byte_size); + OPENVINO_ASSERT(byte_size == 0 || data != nullptr, "Failed to allocate memory"); + initialize_elements(data, element_type, _shape); + _ptr = data; +} + +void* ZeroTensor::data(const ov::element::Type& element_type) const { + if (element_type != ov::element::undefined && element_type != ov::element::dynamic && + (element_type.bitwidth() != get_element_type().bitwidth() || + element_type.is_real() != get_element_type().is_real() || + (element_type == ov::element::string && get_element_type() != ov::element::string) || + (element_type != ov::element::string && get_element_type() == ov::element::string))) { + OPENVINO_THROW("Tensor data with element type ", + get_element_type(), + ", is not representable as pointer to ", + element_type); + } + return _ptr; +} + +const ov::element::Type& ZeroTensor::get_element_type() const { + return _element_type; +} + +const ov::Shape& ZeroTensor::get_shape() const { + return _shape; +} + +void ZeroTensor::update_strides() const { + if (_element_type.bitwidth() < 8) { + return; + } + + auto& shape = get_shape(); + if (_strides.empty() && !shape.empty()) { + _strides.resize(shape.size()); + _strides.back() = shape.back() == 0 ? 0 : _element_type.size(); + std::transform(shape.crbegin(), + shape.crend() - 1, + _strides.rbegin(), + _strides.rbegin() + 1, + std::multiplies()); + } +} + +const ov::Strides& ZeroTensor::get_strides() const { + OPENVINO_ASSERT(_element_type.bitwidth() >= 8, + "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", + _element_type); + std::call_once(_strides_once, &ZeroTensor::update_strides, this); + return _strides; +} + +void ZeroTensor::initialize_elements(void* data, const ov::element::Type& element_type, const ov::Shape& shape) { + if (element_type == ov::element::Type_t::string) { + auto num_elements = shape_size(shape); + auto string_ptr = static_cast(data); + std::uninitialized_fill_n(string_ptr, num_elements, std::string()); + } +} + +size_t ZeroTensor::get_capacity() const { + return shape_size(_capacity); +} + +size_t ZeroTensor::get_bytes_capacity() const { + return ov::element::get_memory_size(get_element_type(), get_capacity()); +} + +void ZeroTensor::destroy_elements(size_t begin_ind, size_t end_ind) { + // it removes elements from tail + if (get_element_type() == ov::element::Type_t::string) { + auto strings = static_cast(_ptr); + for (size_t ind = begin_ind; ind < end_ind; ++ind) { + using std::string; + strings[ind].~string(); + } + } +} + +void ZeroTensor::destroy_memory() { + destroy_elements(0, get_capacity()); + _allocator.deallocate(_ptr, get_bytes_capacity()); + _ptr = nullptr; +} + +void ZeroTensor::set_shape(ov::Shape new_shape) { + if (_shape == new_shape) { + return; + } + + _shape = std::move(new_shape); + + if (get_size() > get_capacity()) { + if (!_init_structs->getMutableCommandListVersion()) { + OPENVINO_THROW("Re-shaping the tensor with a larger shape is not available using this driver version. " + "Please update the driver to the latest version."); + } + + destroy_memory(); + + // allocate buffer and initialize objects from scratch + _capacity = _shape; + _ptr = _allocator.allocate(get_bytes_capacity()); + initialize_elements(_ptr, _element_type, _shape); + + _reset_tensor_memory = true; + } + + _strides.clear(); + update_strides(); +} + +bool ZeroTensor::memory_address_changed() { + return _reset_tensor_memory; +} + +void ZeroTensor::reset_memory_flag() { + _reset_tensor_memory = false; +} + +ZeroTensor::~ZeroTensor() { + destroy_memory(); +} + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/remote_tensor.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/remote_tensor.hpp index 8a6edeb2e8d4ed..e96f02cd58226e 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/remote_tensor.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/remote_tensor.hpp @@ -24,8 +24,6 @@ class RemoteTensor : public ov::IRemoteTensor { const ov::element::Type& element_type, const ov::Shape& shape); - ~RemoteTensor() override; - /** * @brief Returns additional information associated with tensor * @return Map of property names to properties @@ -40,7 +38,7 @@ class RemoteTensor : public ov::IRemoteTensor { /** * @brief Set new shape for tensor - * @note Memory allocation may happen + * @note Allocation of a bigger tensor is not possible * @param shape A new shape */ void set_shape(ov::Shape shape) override; @@ -70,6 +68,8 @@ class RemoteTensor : public ov::IRemoteTensor { virtual bool deallocate() noexcept = 0; void update_strides(); + virtual ~RemoteTensor(); + std::shared_ptr _context; ov::element::Type _element_type; diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp index 788ce87136a04d..635802900d3a12 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp @@ -163,6 +163,10 @@ class SyncInferRequest : public ov::IInferRequest { const ov::Allocator& allocator = {}, const std::optional batchSize = std::nullopt) const; + virtual std::shared_ptr create_tensor(ov::element::Type type, + const ov::Shape& shape, + const ov::Allocator& allocator = {}) const; + bool is_batched_input(size_t idx) const; ov::SoPtr& get_user_input(size_t index) const; diff --git a/src/plugins/intel_npu/src/common/src/remote_tensor.cpp b/src/plugins/intel_npu/src/common/src/remote_tensor.cpp index a12ac7dca81710..40804bffed229c 100644 --- a/src/plugins/intel_npu/src/common/src/remote_tensor.cpp +++ b/src/plugins/intel_npu/src/common/src/remote_tensor.cpp @@ -40,19 +40,18 @@ const ov::AnyMap& RemoteTensor::get_properties() const { } void RemoteTensor::set_shape(ov::Shape new_shape) { + if (_shape == new_shape) { + return; + } + _shape = std::move(new_shape); if (ov::shape_size(_shape) > ov::shape_size(_capacity)) { - if (!deallocate()) { - OPENVINO_THROW("Cannot deallocate tensor while an attempt to enlarge tensor area in set_shape."); - } - - const auto byte_size = ov::element::get_memory_size(_element_type, shape_size(_shape)); - allocate(byte_size); - } else { - _strides.clear(); - update_strides(); + OPENVINO_THROW("Cannot set a new bigger shape to this tensor."); } + + _strides.clear(); + update_strides(); } void RemoteTensor::update_strides() { diff --git a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp index 1379112b3a7852..fe331a3c6dada0 100644 --- a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp @@ -316,10 +316,8 @@ std::shared_ptr SyncInferRequest::allocate_tensor(const IODescripto "The link between state descriptors is missing, state name: ", descriptor.nameFromCompiler); tensor = get_user_input(*descriptor.relatedDescriptorIndex)._ptr; - } else if (allocator) { - tensor = ov::make_tensor(descriptor.precision, allocatedTensorShape, allocator); } else { - tensor = ov::make_tensor(descriptor.precision, allocatedTensorShape); + tensor = create_tensor(descriptor.precision, allocatedTensorShape, allocator); } if (isInput) { @@ -337,6 +335,12 @@ std::shared_ptr SyncInferRequest::allocate_tensor(const IODescripto return tensor; } +std::shared_ptr SyncInferRequest::create_tensor(ov::element::Type type, + const ov::Shape& shape, + const ov::Allocator& allocator) const { + return ov::make_tensor(type, shape, allocator); +} + bool SyncInferRequest::is_batched_input(size_t idx) const { return _userInputTensors.at(idx).size() > 1; } diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp index d14f188a18cb4c..a08467bd2a7d99 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp @@ -25,6 +25,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, ::testing::ValuesIn(configsInferRequestRunTests)), InferRequestRunTests::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, + SetShapeInferRunTests, + ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_NPU), + ::testing::ValuesIn(configsInferRequestRunTests)), + InferRequestRunTests::getTestCaseName); + const std::vector batchingConfigs = { {ov::log::level(ov::log::Level::WARNING), ov::intel_npu::batch_mode(ov::intel_npu::BatchMode::PLUGIN)}, {ov::log::level(ov::log::Level::WARNING), ov::intel_npu::batch_mode(ov::intel_npu::BatchMode::COMPILER)}, diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp index 9b63b96ba4e0bc..5f5cf1941d7fc6 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp @@ -24,6 +24,7 @@ #include "openvino/opsets/opset8.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/core.hpp" +#include "openvino/runtime/intel_npu/level_zero/level_zero.hpp" #include "overload/overload_test_utils_npu.hpp" using CompilationParams = std::tupleget_default_context(target_device); + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + input = compiled_model.input(); + output = compiled_model.output(); + + ov::Tensor input_tensor, first_output_tensor, second_output_tensor; + auto in_shape = input.get_shape(); + auto out_shape = output.get_shape(); + + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = 5.f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + OV_ASSERT_NO_THROW(first_output_tensor = inference_request.get_tensor(output)); + // create dummy Tensors to force the driver to allocate memory for the initial tensor somewhere else + [[maybe_unused]] auto l0_host_dummy_tensor_0 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_1 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_2 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_3 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_4 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_5 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_6 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_7 = context.create_host_tensor(ov::element::f32, dummy_shape); + + auto* actual = first_output_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + EXPECT_NEAR(actual[i], 6.f, 1e-5) << "Expected=6, actual=" << actual[i] << " for index " << i; + } + + // imitates blob reallocation + OV_ASSERT_NO_THROW(input_tensor.set_shape({1, 50, 20, 20})); + OV_ASSERT_NO_THROW(input_tensor.set_shape(in_shape)); + + OV_ASSERT_NO_THROW(second_output_tensor = inference_request.get_tensor(output)); + OV_ASSERT_NO_THROW(second_output_tensor.set_shape({1, 20, 20, 20})); + OV_ASSERT_NO_THROW(second_output_tensor.set_shape(out_shape)); + + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = 9.f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + OV_ASSERT_NO_THROW(second_output_tensor = inference_request.get_tensor(output)); + + actual = second_output_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + EXPECT_NEAR(actual[i], 10.f, 1e-5) << "Expected=10, actual=" << actual[i] << " for index " << i; + } +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/plugins/intel_npu/tests/functional/internal/overload/ov_infer_request/io_tensor.hpp b/src/plugins/intel_npu/tests/functional/internal/overload/ov_infer_request/io_tensor.hpp index eb8bc5bd6362de..e4539d1af091f6 100644 --- a/src/plugins/intel_npu/tests/functional/internal/overload/ov_infer_request/io_tensor.hpp +++ b/src/plugins/intel_npu/tests/functional/internal/overload/ov_infer_request/io_tensor.hpp @@ -5,6 +5,7 @@ #pragma once #include + #include "behavior/ov_infer_request/io_tensor.hpp" #include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "overload/overload_test_utils_npu.hpp" @@ -160,6 +161,10 @@ TEST_P(OVInferRequestIOTensorTestNPU, canInferAfterIOBlobReallocation) { auto in_shape = input.get_shape(); auto out_shape = output.get_shape(); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + // imitates blob reallocation OV_ASSERT_NO_THROW(input_tensor = req.get_tensor(input)); OV_ASSERT_NO_THROW(input_tensor.set_shape({5, 5, 5, 5})); diff --git a/src/plugins/intel_npu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/intel_npu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 9275e8b1dd2573..1cf7a945ad4928 100644 --- a/src/plugins/intel_npu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/intel_npu/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -22,31 +22,31 @@ const std::vector multiConfigs = { const std::vector autoConfigs = { {ov::device::priorities(ov::test::utils::DEVICE_NPU), ov::device::properties(ov::test::utils::DEVICE_NPU, {})}}; -INSTANTIATE_TEST_SUITE_P(compatibility_smoke_BehaviorTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_NPU), ::testing::ValuesIn(configs)), InferRequestParamsAnyMapTestName::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(compatibility_smoke_Multi_BehaviorTests, +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), ::testing::ValuesIn(multiConfigs)), InferRequestParamsAnyMapTestName::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(compatibility_smoke_BehaviorTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTestNPU, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_NPU), ::testing::ValuesIn(configs)), InferRequestParamsAnyMapTestName::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(compatibility_smoke_Multi_BehaviorTests, +INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTestNPU, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_MULTI), ::testing::ValuesIn(multiConfigs)), InferRequestParamsAnyMapTestName::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(compatibility_smoke_Auto_BehaviorTests, +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), ::testing::ValuesIn(autoConfigs)), diff --git a/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp index d62747be75c32d..134ea3dd36bfea 100644 --- a/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_npu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -295,13 +295,6 @@ std::vector disabledTestPatterns() { ".*CompilationForSpecificPlatform.*(3800|3900).*", }); - // [Track number: E#67741] - _skipRegistry.addPatterns( - "Cannot call setShape for Blobs", { - R"(.*(smoke_Behavior|smoke_Auto_Behavior|smoke_Multi_Behavior).*OVInferRequestIOTensorTest.*canInferAfterIOBlobReallocation.*)", - R"(.*(smoke_Behavior|smoke_Auto_Behavior|smoke_Multi_Behavior).*OVInferRequestIOTensorTest.*InferStaticNetworkSetChangedInputTensorThrow.*targetDevice=(NPU_|MULTI_configItem=MULTI_DEVICE_PRIORITIES_NPU).*)" - }); - // [Track number: E#67749] _skipRegistry.addPatterns( "Can't loadNetwork without cache for ReadConcatSplitAssign with precision f32", { From d2b135b77b1703705b328dab9932b7971597c049 Mon Sep 17 00:00:00 2001 From: Nesterov Alexander Date: Mon, 13 Jan 2025 09:04:25 +0100 Subject: [PATCH 2/3] [ARM] Fix build OneDNN for ACL (#28355) ### Tickets: - CVS-160371 OneDNN PR: https://github.com/openvinotoolkit/oneDNN/pull/269 --- src/plugins/intel_cpu/thirdparty/onednn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_cpu/thirdparty/onednn b/src/plugins/intel_cpu/thirdparty/onednn index ec3d68916fb1b0..1efdaaa9bbada4 160000 --- a/src/plugins/intel_cpu/thirdparty/onednn +++ b/src/plugins/intel_cpu/thirdparty/onednn @@ -1 +1 @@ -Subproject commit ec3d68916fb1b03554e00b35b568874ba56093b6 +Subproject commit 1efdaaa9bbada4918c218d4d17a9f9d47a6cbd96 From d4c2e6bbfd27e7f627401a1d509b892c7cec6a30 Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Mon, 13 Jan 2025 09:20:21 +0100 Subject: [PATCH 3/3] [RTTI] Add third argument to RTTI definition (#28394) ### Details: - Added missing parent argument RTTI definition in Core, Frontends and Plugins. ### Tickets: - CVS-160238 --------- Signed-off-by: Tomasz Jankowski --- .../include/low_precision/add.hpp | 2 +- .../low_precision/assign_and_read_value.hpp | 2 +- .../include/low_precision/avg_pool.hpp | 2 +- .../include/low_precision/batch_to_space.hpp | 2 +- .../include/low_precision/broadcast.hpp | 2 +- .../include/low_precision/clamp.hpp | 2 +- .../include/low_precision/concat.hpp | 2 +- .../include/low_precision/convert.hpp | 2 +- .../include/low_precision/convolution.hpp | 2 +- .../include/low_precision/depth_to_space.hpp | 2 +- .../low_precision/eliminate_fake_quantize.hpp | 2 +- .../include/low_precision/fake_quantize.hpp | 2 +- .../low_precision/fake_quantize_decomposition.hpp | 2 +- .../include/low_precision/fold_convert.hpp | 2 +- .../include/low_precision/fold_fake_quantize.hpp | 2 +- .../include/low_precision/fuse_convert.hpp | 2 +- .../fuse_multiply_to_fake_quantize.hpp | 2 +- .../fuse_subtract_to_fake_quantize.hpp | 2 +- .../include/low_precision/gather.hpp | 2 +- .../include/low_precision/group_convolution.hpp | 2 +- .../include/low_precision/interpolate.hpp | 2 +- .../include/low_precision/mat_mul.hpp | 2 +- .../include/low_precision/max_pool.hpp | 2 +- .../include/low_precision/move_fake_quantize.hpp | 2 +- .../include/low_precision/multiply.hpp | 2 +- .../include/low_precision/multiply_partial.hpp | 2 +- .../multiply_to_group_convolution.hpp | 2 +- .../include/low_precision/mvn.hpp | 2 +- .../include/low_precision/normalize_l2.hpp | 2 +- .../include/low_precision/pad.hpp | 2 +- .../include/low_precision/prelu.hpp | 2 +- .../include/low_precision/recurrent_cell.hpp | 2 +- .../include/low_precision/reduce_max.hpp | 2 +- .../include/low_precision/reduce_mean.hpp | 2 +- .../include/low_precision/reduce_min.hpp | 2 +- .../include/low_precision/reduce_sum.hpp | 2 +- .../include/low_precision/relu.hpp | 2 +- .../include/low_precision/reshape.hpp | 2 +- .../include/low_precision/shuffle_channels.hpp | 2 +- .../include/low_precision/slice.hpp | 2 +- .../include/low_precision/space_to_batch.hpp | 2 +- .../include/low_precision/split.hpp | 2 +- .../include/low_precision/squeeze.hpp | 2 +- .../include/low_precision/strided_slice.hpp | 2 +- .../include/low_precision/subtract.hpp | 2 +- .../include/low_precision/transpose.hpp | 2 +- .../include/low_precision/unsqueeze.hpp | 2 +- .../include/low_precision/variadic_split.hpp | 2 +- .../snippets/lowered/pass/allocate_buffers.hpp | 2 +- .../snippets/lowered/pass/assign_registers.hpp | 2 +- .../snippets/lowered/pass/brgemm_blocking.hpp | 2 +- .../lowered/pass/clean_repeated_ptr_shifts.hpp | 2 +- .../snippets/lowered/pass/cleanup_loop_offsets.hpp | 2 +- .../pass/compute_buffer_allocation_size.hpp | 2 +- .../lowered/pass/define_buffer_clusters.hpp | 2 +- .../lowered/pass/extract_loop_invariants.hpp | 2 +- .../include/snippets/lowered/pass/fuse_loops.hpp | 2 +- .../snippets/lowered/pass/init_buffers_default.hpp | 2 +- .../include/snippets/lowered/pass/init_loops.hpp | 2 +- .../snippets/lowered/pass/insert_broadcastmove.hpp | 2 +- .../snippets/lowered/pass/insert_buffers.hpp | 2 +- .../snippets/lowered/pass/insert_load_store.hpp | 2 +- .../include/snippets/lowered/pass/insert_loops.hpp | 2 +- .../snippets/lowered/pass/insert_perf_count.hpp | 2 +- .../lowered/pass/insert_specific_iterations.hpp | 2 +- .../include/snippets/lowered/pass/iter_handler.hpp | 6 +++--- .../pass/load_movebroadcast_to_broadcastload.hpp | 2 +- .../lowered/pass/mark_invariant_shape_path.hpp | 2 +- .../include/snippets/lowered/pass/mark_loops.hpp | 2 +- .../lowered/pass/move_result_out_of_loop.hpp | 2 +- .../lowered/pass/move_scalar_to_consumer.hpp | 2 +- .../lowered/pass/normalize_buffer_reg_groups.hpp | 2 +- .../snippets/lowered/pass/normalize_loop_ids.hpp | 2 +- .../snippets/lowered/pass/optimize_domain.hpp | 2 +- .../pass/optimize_loop_single_evaluation.hpp | 2 +- .../include/snippets/lowered/pass/pass.hpp | 5 ++++- .../lowered/pass/propagate_buffer_offset.hpp | 2 +- .../snippets/lowered/pass/propagate_subtensors.hpp | 2 +- .../snippets/lowered/pass/reduce_decomposition.hpp | 2 +- .../snippets/lowered/pass/serialize_base.hpp | 2 +- .../snippets/lowered/pass/set_buffer_reg_group.hpp | 2 +- .../lowered/pass/set_load_store_scalar.hpp | 2 +- .../snippets/lowered/pass/solve_buffer_memory.hpp | 2 +- .../include/snippets/lowered/pass/split_loops.hpp | 4 ++-- .../include/snippets/lowered/pass/validate.hpp | 2 +- .../snippets/lowered/pass/validate_buffers.hpp | 2 +- .../lowered/pass/validate_expanded_loops.hpp | 2 +- .../snippets/lowered/pass/validate_shapes.hpp | 2 +- .../lowered/pass/validate_unified_loops.hpp | 2 +- .../fold_subgraph_empty_inputs.hpp | 2 +- .../mark_precision_sensitive_shapeof_subgraphs.hpp | 6 +++--- .../remove_concat_zero_dim_input.hpp | 2 +- .../common_optimizations/strides_optimization.hpp | 2 +- .../op_conversions/convert_reduce_to_pooling.hpp | 6 +++--- .../op_conversions/convert_reduce_to_reshape.hpp | 14 +++++++------- .../transformations/rt_info/decompression.hpp | 2 +- .../rt_info/dequantization_node.hpp | 2 +- .../rt_info/disable_fp16_compression.hpp | 2 +- .../rt_info/fused_names_attribute.hpp | 7 ++++--- .../transformations/rt_info/is_shape_subgraph.hpp | 2 +- .../rt_info/keep_const_precision.hpp | 2 +- .../rt_info/nms_selected_indices.hpp | 2 +- .../rt_info/nonconvertible_divide.hpp | 2 +- .../rt_info/old_api_map_element_type_attribute.hpp | 2 +- .../rt_info/old_api_map_order_attribute.hpp | 2 +- .../rt_info/original_precision_attribute.hpp | 2 +- .../rt_info/preprocessing_attribute.hpp | 2 +- .../rt_info/primitives_priority_attribute.hpp | 2 +- .../transformations/rt_info/strides_property.hpp | 2 +- .../rt_info/transpose_sinking_attr.hpp | 2 +- .../transpose_sinking/ts_binary.hpp | 2 +- .../transpose_sinking/ts_concat.hpp | 2 +- .../transpose_sinking/ts_cumsum.hpp | 2 +- .../transpose_sinking/ts_data_movement.hpp | 2 +- .../transpose_sinking/ts_gather.hpp | 2 +- .../transpose_sinking/ts_interpolate.hpp | 2 +- .../transpose_sinking/ts_reduction.hpp | 2 +- .../transpose_sinking/ts_shape_of.hpp | 2 +- .../transformations/transpose_sinking/ts_slice.hpp | 2 +- .../transformations/transpose_sinking/ts_split.hpp | 2 +- .../transpose_sinking/ts_squeeze.hpp | 2 +- .../transformations/transpose_sinking/ts_tile.hpp | 2 +- .../transformations/transpose_sinking/ts_unary.hpp | 2 +- .../transpose_sinking/ts_unsqueeze.hpp | 2 +- .../common_optimizations/nop_elimination.cpp | 2 +- .../mark_subgraphs_to_keep_in_mixed_precision.cpp | 2 +- .../core/rt_info/weightless_caching_attributes.hpp | 2 +- src/core/include/openvino/core/layout.hpp | 2 +- .../openvino/core/preprocess/input_tensor_info.hpp | 2 +- .../op/util/precision_sensitive_attribute.hpp | 2 +- .../include/openvino/op/util/symbolic_info.hpp | 2 +- .../include/openvino/pass/constant_folding.hpp | 2 +- src/core/src/pass/constant_folding.cpp | 2 +- src/core/tests/copy_runtime_info.cpp | 6 +++--- .../include/openvino/frontend/graph_iterator.hpp | 2 +- src/frontends/tensorflow/src/tf_utils.hpp | 2 +- .../frontend/tensorflow_lite/quantization_info.hpp | 2 +- .../frontend/tensorflow_lite/sparsity_info.hpp | 3 ++- .../cpu_opset/arm/pass/convert_group_conv1d.hpp | 4 ++-- .../arm/pass/convert_reduce_multi_axis.hpp | 8 ++++---- .../arm/pass/convert_reduce_no_keep_dims.hpp | 2 +- .../pass/move_readvalue_inputs_to_subgraph.hpp | 2 +- .../cpu_opset/common/pass/stateful_sdpa_fusion.hpp | 2 +- .../cpu_opset/x64/pass/mha_fusion.hpp | 8 ++++---- .../lowered/adjust_brgemm_copy_b_loop_ports.hpp | 2 +- .../x64/pass/lowered/brgemm_cpu_blocking.hpp | 4 ++-- .../pass/lowered/fuse_load_store_and_convert.hpp | 2 +- .../tpp/x64/pass/lowered/brgemm_tpp_blocking.hpp | 6 ++++-- .../tpp/x64/pass/lowered/set_tpp_leading_dim.hpp | 4 ++-- .../transformations/lora_horizontal_fusion.hpp | 2 +- 150 files changed, 184 insertions(+), 177 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/add.hpp b/src/common/low_precision_transformations/include/low_precision/add.hpp index c71e2456a1cd94..ce90e1444a7a33 100644 --- a/src/common/low_precision_transformations/include/low_precision/add.hpp +++ b/src/common/low_precision_transformations/include/low_precision/add.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API AddTransformation : public EltwiseBaseTransformation { public: - OPENVINO_RTTI("AddTransformation", "0"); + OPENVINO_RTTI("AddTransformation", "0", EltwiseBaseTransformation); AddTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp index a5e299e472066c..c351b8bb859a45 100644 --- a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp @@ -13,7 +13,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API AssignAndReadValueTransformation : public LayerTransformation { public: - OPENVINO_RTTI("AssignAndReadValueTransformation", "0"); + OPENVINO_RTTI("AssignAndReadValueTransformation", "0", LayerTransformation); AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp index 43138351cf9957..4806d94187c694 100644 --- a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API AvgPoolTransformation : public LayerTransformation { public: - OPENVINO_RTTI("AvgPoolTransformation", "0"); + OPENVINO_RTTI("AvgPoolTransformation", "0", LayerTransformation); AvgPoolTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp index 0d198565dff23f..7859a29ec3a046 100644 --- a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API BatchToSpaceTransformation : public LayerTransformation { public: - OPENVINO_RTTI("BatchToSpaceTransformation", "0"); + OPENVINO_RTTI("BatchToSpaceTransformation", "0", LayerTransformation); BatchToSpaceTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp index 39ba4052535c29..05f7cadb88e888 100644 --- a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp +++ b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API BroadcastTransformation : public TransparentBaseTransformation { public: - OPENVINO_RTTI("BroadcastTransformation", "0"); + OPENVINO_RTTI("BroadcastTransformation", "0", TransparentBaseTransformation); BroadcastTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/clamp.hpp b/src/common/low_precision_transformations/include/low_precision/clamp.hpp index 0aa2ab3bd8259e..237b956238e809 100644 --- a/src/common/low_precision_transformations/include/low_precision/clamp.hpp +++ b/src/common/low_precision_transformations/include/low_precision/clamp.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ClampTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ClampTransformation", "0"); + OPENVINO_RTTI("ClampTransformation", "0", LayerTransformation); ClampTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/concat.hpp b/src/common/low_precision_transformations/include/low_precision/concat.hpp index c178cfe82ecd8a..9d5ef8fc84f1fe 100644 --- a/src/common/low_precision_transformations/include/low_precision/concat.hpp +++ b/src/common/low_precision_transformations/include/low_precision/concat.hpp @@ -29,7 +29,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ConcatTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ConcatTransformation", "0"); + OPENVINO_RTTI("ConcatTransformation", "0", LayerTransformation); ConcatTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/convert.hpp b/src/common/low_precision_transformations/include/low_precision/convert.hpp index 70fb76107f050d..930d7654be04bb 100644 --- a/src/common/low_precision_transformations/include/low_precision/convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convert.hpp @@ -13,7 +13,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API ConvertTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ConvertTransformation", "0"); + OPENVINO_RTTI("ConvertTransformation", "0", LayerTransformation); ConvertTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/convolution.hpp b/src/common/low_precision_transformations/include/low_precision/convolution.hpp index 0d687325692306..d91d3dd9747240 100644 --- a/src/common/low_precision_transformations/include/low_precision/convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convolution.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ConvolutionTransformation : public WeightableLayerTransformation { public: - OPENVINO_RTTI("ConvolutionTransformation", "0"); + OPENVINO_RTTI("ConvolutionTransformation", "0", WeightableLayerTransformation); ConvolutionTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp index 0b8c2f6a4f38e2..2e8515c49a01c6 100644 --- a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API DepthToSpaceTransformation : public TransparentBaseTransformation { public: - OPENVINO_RTTI("DepthToSpaceTransformation", "0"); + OPENVINO_RTTI("DepthToSpaceTransformation", "0", TransparentBaseTransformation); DepthToSpaceTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp index 2ad435983364dc..bfaa0c3b3a2b1b 100644 --- a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public CleanupTransformation { public: - OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0"); + OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0", CleanupTransformation); EliminateFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp index 7d1fd56940f65d..554133df6cb205 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FakeQuantizeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("FakeQuantizeTransformation", "0"); + OPENVINO_RTTI("FakeQuantizeTransformation", "0", LayerTransformation); FakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp index bb898e37dd41c9..393e1bb28a2a49 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp @@ -23,7 +23,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FakeQuantizeDecompositionTransformation : public LayerTransformation { public: - OPENVINO_RTTI("FakeQuantizeDecompositionTransformation", "0"); + OPENVINO_RTTI("FakeQuantizeDecompositionTransformation", "0", LayerTransformation); FakeQuantizeDecompositionTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp index f2eff4ec0bb9ad..e773f0c8d659c2 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp @@ -23,7 +23,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FoldConvertTransformation : public CleanupTransformation { public: - OPENVINO_RTTI("FoldConvertTransformation", "0"); + OPENVINO_RTTI("FoldConvertTransformation", "0", CleanupTransformation); FoldConvertTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp index 545c2eb430f1c5..75f0c1c80eeeee 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FoldFakeQuantizeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("FoldFakeQuantizeTransformation", "0"); + OPENVINO_RTTI("FoldFakeQuantizeTransformation", "0", LayerTransformation); FoldFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp index bbcc934d305ca6..00c6659c50a45e 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FuseConvertTransformation : public CleanupTransformation { public: - OPENVINO_RTTI("FuseConvertTransformation", "0"); + OPENVINO_RTTI("FuseConvertTransformation", "0", CleanupTransformation); FuseConvertTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp index 0bb4f874c16af2..173ec9a86b8e25 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public FuseElementwiseToFakeQuantizeTransformation { public: - OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0"); + OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseMultiplyToFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp index 74e5ce299f6296..72851801ae644f 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public FuseElementwiseToFakeQuantizeTransformation { public: - OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0"); + OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseSubtractToFakeQuantizeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp index 16cf0fabd5427f..6aebd3fb094e0a 100644 --- a/src/common/low_precision_transformations/include/low_precision/gather.hpp +++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp @@ -13,7 +13,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API GatherTransformation : public LayerTransformation { public: - OPENVINO_RTTI("GatherTransformation", "0"); + OPENVINO_RTTI("GatherTransformation", "0", LayerTransformation); GatherTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp index b51b7cb2a8ac84..b8a967e28bde7f 100644 --- a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API GroupConvolutionTransformation : public ConvolutionTransformation { public: - OPENVINO_RTTI("GroupConvolutionTransformation", "0"); + OPENVINO_RTTI("GroupConvolutionTransformation", "0", ConvolutionTransformation); GroupConvolutionTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp index ad8b3268d05ea6..20bd5f5ba1af66 100644 --- a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp +++ b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API InterpolateTransformation : public LayerTransformation { public: - OPENVINO_RTTI("InterpolateTransformation", "0"); + OPENVINO_RTTI("InterpolateTransformation", "0", LayerTransformation); InterpolateTransformation(const Params& params = Params()); bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp index bc0077a716f701..aef76d2b591e9a 100644 --- a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MatMulTransformation : public LayerTransformation { public: - OPENVINO_RTTI("MatMulTransformation", "0"); + OPENVINO_RTTI("MatMulTransformation", "0", LayerTransformation); MatMulTransformation(const Params& params = Params()); bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp index 16b3fb659173e1..73b8b9b097a291 100644 --- a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MaxPoolTransformation : public LayerTransformation { public: - OPENVINO_RTTI("MaxPoolTransformation", "0"); + OPENVINO_RTTI("MaxPoolTransformation", "0", LayerTransformation); MaxPoolTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp index 169a4261a4aeaa..ba3740ecb417ec 100644 --- a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp @@ -14,7 +14,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API MoveFakeQuantize : public LayerTransformation { public: - OPENVINO_RTTI("MoveFakeQuantize", "0"); + OPENVINO_RTTI("MoveFakeQuantize", "0", LayerTransformation); MoveFakeQuantize(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply.hpp b/src/common/low_precision_transformations/include/low_precision/multiply.hpp index 576cb7b23b6080..ef6f942448ca12 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MultiplyTransformation : public WeightableLayerTransformation { public: - OPENVINO_RTTI("MultiplyTransformation", "0"); + OPENVINO_RTTI("MultiplyTransformation", "0", WeightableLayerTransformation); MultiplyTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp index f91f54cd1ad0df..0880d914d82b20 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseTransformation { public: - OPENVINO_RTTI("MultiplyPartialTransformation", "0"); + OPENVINO_RTTI("MultiplyPartialTransformation", "0", EltwiseBaseTransformation); MultiplyPartialTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp index b2091d44fdd8be..621813a373df58 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public CleanupTransformation { public: - OPENVINO_RTTI("MultiplyToGroupConvolutionTransformation", "0"); + OPENVINO_RTTI("MultiplyToGroupConvolutionTransformation", "0", CleanupTransformation); MultiplyToGroupConvolutionTransformation( const Params& params = Params(), const PrecisionsRestriction::PrecisionsByPorts& restrictions = {}); diff --git a/src/common/low_precision_transformations/include/low_precision/mvn.hpp b/src/common/low_precision_transformations/include/low_precision/mvn.hpp index f875e32d96e87a..3a3f087dba7c78 100644 --- a/src/common/low_precision_transformations/include/low_precision/mvn.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mvn.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API MVNTransformation : public LayerTransformation { public: - OPENVINO_RTTI("MVNTransformation", "0"); + OPENVINO_RTTI("MVNTransformation", "0", LayerTransformation); MVNTransformation(const Params& params = Params()); bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp index 5d31067bfbb5d9..1976a3f1e610bf 100644 --- a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp +++ b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp @@ -20,7 +20,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API NormalizeL2Transformation : public LayerTransformation { public: - OPENVINO_RTTI("NormalizeL2Transformation", "0"); + OPENVINO_RTTI("NormalizeL2Transformation", "0", LayerTransformation); NormalizeL2Transformation(const Params& params = Params()); bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/pad.hpp b/src/common/low_precision_transformations/include/low_precision/pad.hpp index 8eccf9f87e7c7e..f2aef2994f2484 100644 --- a/src/common/low_precision_transformations/include/low_precision/pad.hpp +++ b/src/common/low_precision_transformations/include/low_precision/pad.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API PadTransformation : public LayerTransformation { public: - OPENVINO_RTTI("PadTransformation", "0"); + OPENVINO_RTTI("PadTransformation", "0", LayerTransformation); PadTransformation(const Params& params = Params()); bool transform(TransformationContext& context, pattern::Matcher& m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/prelu.hpp b/src/common/low_precision_transformations/include/low_precision/prelu.hpp index 6a712b3e77306c..983aee4fcd4136 100644 --- a/src/common/low_precision_transformations/include/low_precision/prelu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/prelu.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API PReluTransformation : public LayerTransformation { public: - OPENVINO_RTTI("PReluTransformation", "0"); + OPENVINO_RTTI("PReluTransformation", "0", LayerTransformation); PReluTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp index 22aaf3281c2b94..fc0401b08dd74e 100644 --- a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp +++ b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp @@ -14,7 +14,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransformation { public: - OPENVINO_RTTI("RecurrentCellTransformation", "0"); + OPENVINO_RTTI("RecurrentCellTransformation", "0", LayerTransformation); RecurrentCellTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp index 813d5d7a4a6965..19721c4721f545 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp @@ -24,7 +24,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReduceMaxTransformation : public ReduceBaseTransformation { public: - OPENVINO_RTTI("ReduceMaxTransformation", "0"); + OPENVINO_RTTI("ReduceMaxTransformation", "0", ReduceBaseTransformation); ReduceMaxTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp index 4bf3a98a628522..c295143d3fe7ee 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp @@ -24,7 +24,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReduceMeanTransformation : public ReduceBaseTransformation { public: - OPENVINO_RTTI("ReduceMeanTransformation", "0"); + OPENVINO_RTTI("ReduceMeanTransformation", "0", ReduceBaseTransformation); ReduceMeanTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp index 0f82cfe52d7bc6..5da128c79c522e 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp @@ -24,7 +24,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReduceMinTransformation : public ReduceBaseTransformation { public: - OPENVINO_RTTI("ReduceMinTransformation", "0"); + OPENVINO_RTTI("ReduceMinTransformation", "0", ReduceBaseTransformation); ReduceMinTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp index 2e6ab0505457cf..db296612aac04a 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp @@ -24,7 +24,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReduceSumTransformation : public ReduceBaseTransformation { public: - OPENVINO_RTTI("ReduceSumTransformation", "0"); + OPENVINO_RTTI("ReduceSumTransformation", "0", ReduceBaseTransformation); ReduceSumTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/relu.hpp b/src/common/low_precision_transformations/include/low_precision/relu.hpp index 3c2a6dbb11422b..75d9141e80ac78 100644 --- a/src/common/low_precision_transformations/include/low_precision/relu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/relu.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReluTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ReluTransformation", "0"); + OPENVINO_RTTI("ReluTransformation", "0", LayerTransformation); ReluTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/reshape.hpp b/src/common/low_precision_transformations/include/low_precision/reshape.hpp index b5885ccbbaa7de..072263f59a8a94 100644 --- a/src/common/low_precision_transformations/include/low_precision/reshape.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reshape.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ReshapeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ReshapeTransformation", "0"); + OPENVINO_RTTI("ReshapeTransformation", "0", LayerTransformation); ReshapeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp index 7163269a5c0ad1..638a2580ade39f 100644 --- a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp +++ b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API ShuffleChannelsTransformation : public LayerTransformation { public: - OPENVINO_RTTI("ShuffleChannelsTransformation", "0"); + OPENVINO_RTTI("ShuffleChannelsTransformation", "0", LayerTransformation); ShuffleChannelsTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/slice.hpp b/src/common/low_precision_transformations/include/low_precision/slice.hpp index 206a07d7bca9c7..c00028f0d71169 100644 --- a/src/common/low_precision_transformations/include/low_precision/slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/slice.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API SliceTransformation : public LayerTransformation { public: - OPENVINO_RTTI("SliceTransformation", "0"); + OPENVINO_RTTI("SliceTransformation", "0", LayerTransformation); SliceTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp index c381f9229c4455..e05353aaf24d1c 100644 --- a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp +++ b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API SpaceToBatchTransformation : public LayerTransformation { public: - OPENVINO_RTTI("SpaceToBatchTransformation", "0"); + OPENVINO_RTTI("SpaceToBatchTransformation", "0", LayerTransformation); SpaceToBatchTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/split.hpp b/src/common/low_precision_transformations/include/low_precision/split.hpp index 323ebcb3e3cabe..2e3db010801b03 100644 --- a/src/common/low_precision_transformations/include/low_precision/split.hpp +++ b/src/common/low_precision_transformations/include/low_precision/split.hpp @@ -23,7 +23,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API SplitTransformation : public LayerTransformation { public: - OPENVINO_RTTI("SplitTransformation", "0"); + OPENVINO_RTTI("SplitTransformation", "0", LayerTransformation); SplitTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp index c46111b95f7d4c..8709250ed85104 100644 --- a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API SqueezeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("SqueezeTransformation", "0"); + OPENVINO_RTTI("SqueezeTransformation", "0", LayerTransformation); SqueezeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp index b00d520b465b1c..730531dd93f669 100644 --- a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API StridedSliceTransformation : public LayerTransformation { public: - OPENVINO_RTTI("StridedSliceTransformation", "0"); + OPENVINO_RTTI("StridedSliceTransformation", "0", LayerTransformation); StridedSliceTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/subtract.hpp b/src/common/low_precision_transformations/include/low_precision/subtract.hpp index 07fdafcc4dd095..9bb9a3c94dfc95 100644 --- a/src/common/low_precision_transformations/include/low_precision/subtract.hpp +++ b/src/common/low_precision_transformations/include/low_precision/subtract.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API SubtractTransformation : public LayerTransformation { public: - OPENVINO_RTTI("SubtractTransformation", "0"); + OPENVINO_RTTI("SubtractTransformation", "0", LayerTransformation); SubtractTransformation(const Params& params); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/transpose.hpp b/src/common/low_precision_transformations/include/low_precision/transpose.hpp index 95a00cd5220f14..08e1dde9cbab8f 100644 --- a/src/common/low_precision_transformations/include/low_precision/transpose.hpp +++ b/src/common/low_precision_transformations/include/low_precision/transpose.hpp @@ -22,7 +22,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API TransposeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("TransposeTransformation", "0"); + OPENVINO_RTTI("TransposeTransformation", "0", LayerTransformation); TransposeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp index a210ade145088c..b9dff50ec2b894 100644 --- a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp @@ -21,7 +21,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API UnsqueezeTransformation : public LayerTransformation { public: - OPENVINO_RTTI("UnsqueezeTransformation", "0"); + OPENVINO_RTTI("UnsqueezeTransformation", "0", LayerTransformation); UnsqueezeTransformation(const Params& params = Params()); bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; diff --git a/src/common/low_precision_transformations/include/low_precision/variadic_split.hpp b/src/common/low_precision_transformations/include/low_precision/variadic_split.hpp index 3584866b5cc0ac..9c2c2d3eec25c9 100644 --- a/src/common/low_precision_transformations/include/low_precision/variadic_split.hpp +++ b/src/common/low_precision_transformations/include/low_precision/variadic_split.hpp @@ -23,7 +23,7 @@ namespace low_precision { */ class LP_TRANSFORMATIONS_API VariadicSplitTransformation : public SplitTransformation { public: - OPENVINO_RTTI("VariadicSplitTransformation", "0"); + OPENVINO_RTTI("VariadicSplitTransformation", "0", SplitTransformation); VariadicSplitTransformation(const Params& params = Params()); }; } // namespace low_precision diff --git a/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp b/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp index cd28f6d2d17084..00ccd14925969d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp @@ -25,7 +25,7 @@ namespace pass { */ class AllocateBuffers: public RangedPass { public: - OPENVINO_RTTI("AllocateBuffers", "RangedPass") + OPENVINO_RTTI("AllocateBuffers", "", RangedPass); AllocateBuffers(bool is_optimized = true); /** diff --git a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp index 147494a56c9904..986a099246804d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp @@ -20,7 +20,7 @@ namespace pass { */ class AssignRegisters : public Pass { public: - OPENVINO_RTTI("AssignRegisters", "Pass") + OPENVINO_RTTI("AssignRegisters", "", Pass); explicit AssignRegisters(const std::function& out)>& mapper, const size_t reg_cnt) : m_reg_type_mapper(mapper), reg_count(reg_cnt) {} bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp b/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp index 9a6a7b2cefc92c..3b59f83003d565 100644 --- a/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp @@ -94,7 +94,7 @@ template ::value, bool>::type = true> class BrgemmBlocking : public snippets::lowered::pass::RangedPass, public BrgemmBlockingBase { public: - OPENVINO_RTTI("BrgemmBlocking", "RangedPass") + OPENVINO_RTTI("BrgemmBlocking", "", RangedPass); bool run(snippets::lowered::LinearIR& linear_ir, snippets::lowered::LinearIR::constExprIt begin, diff --git a/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp b/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp index 0a724b4f2670e0..10e0635be553d4 100644 --- a/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp @@ -23,7 +23,7 @@ namespace pass { */ class CleanRepeatedDataPointerShifts: public RangedPass { public: - OPENVINO_RTTI("CleanRepeatedDataPointerShifts", "RangedPass") + OPENVINO_RTTI("CleanRepeatedDataPointerShifts", "", RangedPass); CleanRepeatedDataPointerShifts() = default; bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/cleanup_loop_offsets.hpp b/src/common/snippets/include/snippets/lowered/pass/cleanup_loop_offsets.hpp index cf72577ea98859..5ee4efb2909968 100644 --- a/src/common/snippets/include/snippets/lowered/pass/cleanup_loop_offsets.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/cleanup_loop_offsets.hpp @@ -19,7 +19,7 @@ namespace pass { */ class CleanupLoopOffsets : public RangedPass { public: - OPENVINO_RTTI("CleanupLoopOffsets", "RangedPass") + OPENVINO_RTTI("CleanupLoopOffsets", "", RangedPass); bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/compute_buffer_allocation_size.hpp b/src/common/snippets/include/snippets/lowered/pass/compute_buffer_allocation_size.hpp index 01d8b3ee85261e..1f2e672ca104d3 100644 --- a/src/common/snippets/include/snippets/lowered/pass/compute_buffer_allocation_size.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/compute_buffer_allocation_size.hpp @@ -21,7 +21,7 @@ namespace pass { */ class ComputeBufferAllocationSize : public RangedPass { public: - OPENVINO_RTTI("ComputeBufferAllocationSize", "RangedPass") + OPENVINO_RTTI("ComputeBufferAllocationSize", "", RangedPass); ComputeBufferAllocationSize() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp b/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp index 312abb02abf7b5..aefb6ed39ac22b 100644 --- a/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/define_buffer_clusters.hpp @@ -33,7 +33,7 @@ namespace pass { */ class DefineBufferClusters : public RangedPass { public: - OPENVINO_RTTI("DefineBufferClusters", "RangedPass") + OPENVINO_RTTI("DefineBufferClusters", "", RangedPass); DefineBufferClusters() = default; diff --git a/src/common/snippets/include/snippets/lowered/pass/extract_loop_invariants.hpp b/src/common/snippets/include/snippets/lowered/pass/extract_loop_invariants.hpp index d4fb19f8d0c6a4..99432e7ac88909 100644 --- a/src/common/snippets/include/snippets/lowered/pass/extract_loop_invariants.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/extract_loop_invariants.hpp @@ -20,7 +20,7 @@ namespace pass { */ class ExtractLoopInvariants : public RangedPass { public: - OPENVINO_RTTI("ExtractLoopInvariants", "RangedPass") + OPENVINO_RTTI("ExtractLoopInvariants", "", RangedPass); ExtractLoopInvariants() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/fuse_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/fuse_loops.hpp index fda2f914f157a7..53b0585b0bea87 100644 --- a/src/common/snippets/include/snippets/lowered/pass/fuse_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/fuse_loops.hpp @@ -38,7 +38,7 @@ namespace pass { */ class FuseLoops : public RangedPass { public: - OPENVINO_RTTI("FuseLoops", "RangedPass") + OPENVINO_RTTI("FuseLoops", "", RangedPass); FuseLoops(); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/init_buffers_default.hpp b/src/common/snippets/include/snippets/lowered/pass/init_buffers_default.hpp index 5ddb2749d63998..6c48e1fb0229a1 100644 --- a/src/common/snippets/include/snippets/lowered/pass/init_buffers_default.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/init_buffers_default.hpp @@ -19,7 +19,7 @@ namespace pass { class InitBuffersDefault : public RangedPass { public: - OPENVINO_RTTI("InitBuffersDefault", "RangedPass") + OPENVINO_RTTI("InitBuffersDefault", "", RangedPass); InitBuffersDefault(size_t& buffer_scratchpad_size) : m_buffer_scratchpad_size(buffer_scratchpad_size) { m_buffer_scratchpad_size = 0; diff --git a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp index e94e1977974716..ce010716fbed09 100644 --- a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp @@ -20,7 +20,7 @@ namespace pass { */ class InitLoops : public Pass { public: - OPENVINO_RTTI("InitLoops", "Pass") + OPENVINO_RTTI("InitLoops", "", Pass); InitLoops() = default; bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_broadcastmove.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_broadcastmove.hpp index bd4ca6e9fd4394..876e0f84df01db 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_broadcastmove.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_broadcastmove.hpp @@ -18,7 +18,7 @@ namespace pass { */ class InsertBroadcastMove : public RangedPass { public: - OPENVINO_RTTI("InsertBroadcastMove", "RangedPass") + OPENVINO_RTTI("InsertBroadcastMove", "", RangedPass); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; static bool is_broadcasting_supported(const std::shared_ptr& n); diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_buffers.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_buffers.hpp index 32b5e241ba4cf8..c0e15ec94c1d29 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_buffers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_buffers.hpp @@ -23,7 +23,7 @@ namespace pass { */ class InsertBuffers : public RangedPass { public: - OPENVINO_RTTI("InsertBuffers", "RangedPass") + OPENVINO_RTTI("InsertBuffers", "", RangedPass); InsertBuffers() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_load_store.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_load_store.hpp index e404de0f7d81fe..8aaf2eaa4cce62 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_load_store.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_load_store.hpp @@ -22,7 +22,7 @@ namespace pass { */ class InsertLoadStore : public RangedPass { public: - OPENVINO_RTTI("InsertLoadStore", "RangedPass") + OPENVINO_RTTI("InsertLoadStore", "", RangedPass); explicit InsertLoadStore(size_t vector_size); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp index 1c86ccbbc835a3..62a8267fea2c58 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_loops.hpp @@ -21,7 +21,7 @@ namespace pass { */ class InsertLoops : public RangedPass { public: - OPENVINO_RTTI("InsertLoops", "RangedPass") + OPENVINO_RTTI("InsertLoops", "", RangedPass); InsertLoops() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; private: diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp index 6d20db94037887..8a44df489a276a 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_perf_count.hpp @@ -23,7 +23,7 @@ namespace pass { */ class InsertPerfCount: public RangedPass { public: - OPENVINO_RTTI("InsertPerfCount", "RangedPass") + OPENVINO_RTTI("InsertPerfCount", "", RangedPass); InsertPerfCount(std::map boundary_op_names); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_specific_iterations.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_specific_iterations.hpp index 34c0ddc87e2b53..44acda6d8b08a8 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_specific_iterations.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_specific_iterations.hpp @@ -24,7 +24,7 @@ namespace pass { */ class InsertSpecificIterations : public RangedPass { public: - OPENVINO_RTTI("InsertSpecificIterations", "RangedPass") + OPENVINO_RTTI("InsertSpecificIterations", "", RangedPass); InsertSpecificIterations() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/iter_handler.hpp b/src/common/snippets/include/snippets/lowered/pass/iter_handler.hpp index 2587ffbd546dfa..6fc2763f7db58d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/iter_handler.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/iter_handler.hpp @@ -22,7 +22,7 @@ namespace pass { class UpdateMemoryAccessCounts : public pass::RangedPass { public: UpdateMemoryAccessCounts(size_t count); - OPENVINO_RTTI("UpdateMemoryAccessCounts", "RangedPass") + OPENVINO_RTTI("UpdateMemoryAccessCounts", "", RangedPass); bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; std::shared_ptr merge(const std::shared_ptr& other) override; @@ -39,7 +39,7 @@ class UpdateMemoryAccessCounts : public pass::RangedPass { class SetFillOffset : public pass::RangedPass { public: SetFillOffset(size_t offset); - OPENVINO_RTTI("SetFillOffset", "RangedPass") + OPENVINO_RTTI("SetFillOffset", "", RangedPass); bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; std::shared_ptr merge(const std::shared_ptr& other) override; @@ -55,7 +55,7 @@ class SetFillOffset : public pass::RangedPass { class SetLoopIncrementOne : public snippets::lowered::pass::RangedPass { public: SetLoopIncrementOne() = default; - OPENVINO_RTTI("SetLoopIncrementOne", "RangedPass") + OPENVINO_RTTI("SetLoopIncrementOne", "", RangedPass); bool run(snippets::lowered::LinearIR& linear_ir, snippets::lowered::LinearIR::constExprIt begin, snippets::lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/load_movebroadcast_to_broadcastload.hpp b/src/common/snippets/include/snippets/lowered/pass/load_movebroadcast_to_broadcastload.hpp index f7585d586412ec..84dac29e135529 100644 --- a/src/common/snippets/include/snippets/lowered/pass/load_movebroadcast_to_broadcastload.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/load_movebroadcast_to_broadcastload.hpp @@ -19,7 +19,7 @@ namespace pass { class LoadMoveBroadcastToBroadcastLoad: public RangedPass { public: LoadMoveBroadcastToBroadcastLoad() = default; - OPENVINO_RTTI("LoadMoveBroadcastToBroadcastLoad", "RangedPass") + OPENVINO_RTTI("LoadMoveBroadcastToBroadcastLoad", "", RangedPass); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp b/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp index 6a31a697baca77..ba036586f573a0 100644 --- a/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/mark_invariant_shape_path.hpp @@ -22,7 +22,7 @@ namespace pass { */ class MarkInvariantShapePath: public RangedPass { public: - OPENVINO_RTTI("MarkInvariantShapePath", "RangedPass") + OPENVINO_RTTI("MarkInvariantShapePath", "", RangedPass); MarkInvariantShapePath() = default; /** diff --git a/src/common/snippets/include/snippets/lowered/pass/mark_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/mark_loops.hpp index f3c1cd4c8f9818..20056eae3da54d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/mark_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/mark_loops.hpp @@ -22,7 +22,7 @@ namespace pass { */ class MarkLoops : public RangedPass { public: - OPENVINO_RTTI("MarkLoops", "RangedPass") + OPENVINO_RTTI("MarkLoops", "", RangedPass); MarkLoops(size_t vector_size); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/move_result_out_of_loop.hpp b/src/common/snippets/include/snippets/lowered/pass/move_result_out_of_loop.hpp index c0428a60fe8fea..9cf07418a74110 100644 --- a/src/common/snippets/include/snippets/lowered/pass/move_result_out_of_loop.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/move_result_out_of_loop.hpp @@ -21,7 +21,7 @@ namespace pass { */ class MoveResultOutOfLoop : public Pass { public: - OPENVINO_RTTI("MoveResultOutOfLoop", "Pass") + OPENVINO_RTTI("MoveResultOutOfLoop", "", Pass); MoveResultOutOfLoop() = default; bool run(LinearIR& linear_ir) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/move_scalar_to_consumer.hpp b/src/common/snippets/include/snippets/lowered/pass/move_scalar_to_consumer.hpp index ba2cfcbb755e9b..121b830272152a 100644 --- a/src/common/snippets/include/snippets/lowered/pass/move_scalar_to_consumer.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/move_scalar_to_consumer.hpp @@ -24,7 +24,7 @@ namespace pass { */ class MoveScalarToConsumer : public Pass { public: - OPENVINO_RTTI("MoveScalarsToConsumer", "Pass") + OPENVINO_RTTI("MoveScalarsToConsumer", "", Pass); MoveScalarToConsumer() = default; bool run(LinearIR& linear_ir) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/normalize_buffer_reg_groups.hpp b/src/common/snippets/include/snippets/lowered/pass/normalize_buffer_reg_groups.hpp index e07d11da70d904..b98d7d46bfbbee 100644 --- a/src/common/snippets/include/snippets/lowered/pass/normalize_buffer_reg_groups.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/normalize_buffer_reg_groups.hpp @@ -25,7 +25,7 @@ namespace pass { class NormalizeBufferRegisterGroups : public RangedPass { public: - OPENVINO_RTTI("NormalizeBufferRegisterGroups", "RangedPass") + OPENVINO_RTTI("NormalizeBufferRegisterGroups", "", RangedPass); /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR diff --git a/src/common/snippets/include/snippets/lowered/pass/normalize_loop_ids.hpp b/src/common/snippets/include/snippets/lowered/pass/normalize_loop_ids.hpp index ba7c673b129905..ca672392602894 100644 --- a/src/common/snippets/include/snippets/lowered/pass/normalize_loop_ids.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/normalize_loop_ids.hpp @@ -29,7 +29,7 @@ namespace pass { class NormalizeLoopIDs : public Pass { public: - OPENVINO_RTTI("NormalizeLoopIDs", "Pass") + OPENVINO_RTTI("NormalizeLoopIDs", "", Pass); NormalizeLoopIDs(bool has_specific_loops = true) : m_has_specific_loops(has_specific_loops) {} bool run(lowered::LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp index 4ae68fc38cf37e..607006d1a836bf 100644 --- a/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp @@ -46,7 +46,7 @@ namespace pass { class OptimizeDomain : public snippets::lowered::pass::Pass { public: - OPENVINO_RTTI("OptimizeDomain", "Pass") + OPENVINO_RTTI("OptimizeDomain", "", snippets::lowered::pass::Pass) explicit OptimizeDomain(size_t& tile_rank); bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp b/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp index b320bd8396e866..79d1fe6e489598 100644 --- a/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp @@ -20,7 +20,7 @@ namespace pass { */ class OptimizeLoopSingleEvaluation : public RangedPass { public: - OPENVINO_RTTI("OptimizeLoopSingleEvaluation", "RangedPass") + OPENVINO_RTTI("OptimizeLoopSingleEvaluation", "", RangedPass); bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/pass.hpp b/src/common/snippets/include/snippets/lowered/pass/pass.hpp index 2758ab85070341..bf08f653e83277 100644 --- a/src/common/snippets/include/snippets/lowered/pass/pass.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/pass.hpp @@ -27,7 +27,7 @@ class PassBase : public std::enable_shared_from_this { // Note that get_type_info_static and get_type_info are needed to mimic OPENVINO_RTTI interface, // so the standard OPENVINO_RTTI(...) macros could be used in derived classes. _OPENVINO_HIDDEN_METHOD static const ::ov::DiscreteTypeInfo& get_type_info_static() { - static ::ov::DiscreteTypeInfo type_info_static {"PassBase"}; + static ::ov::DiscreteTypeInfo type_info_static {"snippets::lowered::pass::PassBase"}; type_info_static.hash(); return type_info_static; } @@ -59,6 +59,7 @@ class PassBase : public std::enable_shared_from_this { */ class Pass : public PassBase { public: + OPENVINO_RTTI("snippets::lowered::pass::Pass", "", PassBase) /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR @@ -74,6 +75,7 @@ class Pass : public PassBase { */ class ConstPass : public PassBase { public: + OPENVINO_RTTI("snippets::lowered::pass::ConstPass", "", PassBase) /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR @@ -89,6 +91,7 @@ class ConstPass : public PassBase { */ class RangedPass : public PassBase { public: + OPENVINO_RTTI("snippets::lowered::pass::RangedPass", "", PassBase) /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR diff --git a/src/common/snippets/include/snippets/lowered/pass/propagate_buffer_offset.hpp b/src/common/snippets/include/snippets/lowered/pass/propagate_buffer_offset.hpp index d895b3a60cd26d..168877260c1b0b 100644 --- a/src/common/snippets/include/snippets/lowered/pass/propagate_buffer_offset.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/propagate_buffer_offset.hpp @@ -19,7 +19,7 @@ namespace pass { */ class PropagateBufferOffset: public Pass { public: - OPENVINO_RTTI("PropagateBufferOffset", "Pass") + OPENVINO_RTTI("PropagateBufferOffset", "", Pass); PropagateBufferOffset() = default; /** diff --git a/src/common/snippets/include/snippets/lowered/pass/propagate_subtensors.hpp b/src/common/snippets/include/snippets/lowered/pass/propagate_subtensors.hpp index b48efb89051b34..5f8c39dd11e31a 100644 --- a/src/common/snippets/include/snippets/lowered/pass/propagate_subtensors.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/propagate_subtensors.hpp @@ -22,7 +22,7 @@ namespace pass { class UpdateSubtensors : public pass::RangedPass { public: UpdateSubtensors(size_t tail_size); - OPENVINO_RTTI("UpdateSubtensors", "RangedPass") + OPENVINO_RTTI("UpdateSubtensors", "", RangedPass); bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; std::shared_ptr merge(const std::shared_ptr& other) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp b/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp index aaf374c3108f13..c66f030e96a90d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/reduce_decomposition.hpp @@ -19,7 +19,7 @@ namespace pass { */ class ReduceDecomposition : public RangedPass { public: - OPENVINO_RTTI("ReduceDecomposition", "RangedPass") + OPENVINO_RTTI("ReduceDecomposition", "", RangedPass); explicit ReduceDecomposition(size_t vector_size); bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp index 560744f4eb09d8..2207592fbb22f5 100644 --- a/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp @@ -18,7 +18,7 @@ namespace pass { */ class SerializeBase : public ConstPass { public: - OPENVINO_RTTI("SerializeBase", "ConstPass") + OPENVINO_RTTI("SerializeBase", "", ConstPass) SerializeBase(const std::string& xml_path); protected: diff --git a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp index dc22ce4beff1a0..bdcdcfe7165f85 100644 --- a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp @@ -31,7 +31,7 @@ namespace pass { */ class SetBufferRegGroup: public RangedPass { public: - OPENVINO_RTTI("SetBufferRegGroup", "RangedPass") + OPENVINO_RTTI("SetBufferRegGroup", "", RangedPass); SetBufferRegGroup() = default; /** diff --git a/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp b/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp index bfc254b3a11df1..9a5ecb4c82fdcd 100644 --- a/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp @@ -20,7 +20,7 @@ namespace pass { */ class SetLoadStoreScalar : public RangedPass { public: - OPENVINO_RTTI("SetLoadStoreScalar", "RangedPass") + OPENVINO_RTTI("SetLoadStoreScalar", "", RangedPass); SetLoadStoreScalar() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/solve_buffer_memory.hpp b/src/common/snippets/include/snippets/lowered/pass/solve_buffer_memory.hpp index 4d3c9f95350f4b..39614e71ae2052 100644 --- a/src/common/snippets/include/snippets/lowered/pass/solve_buffer_memory.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/solve_buffer_memory.hpp @@ -24,7 +24,7 @@ namespace pass { */ class SolveBufferMemory : public Pass { public: - OPENVINO_RTTI("SolveBufferMemory", "Pass") + OPENVINO_RTTI("SolveBufferMemory", "", Pass); SolveBufferMemory(size_t& static_buffer_scratchpad_size) : m_static_buffer_scratchpad_size(static_buffer_scratchpad_size) {} /** diff --git a/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp index 953f20bbe56c3d..9a1843e34b134e 100644 --- a/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp @@ -31,7 +31,7 @@ namespace pass { class SplitLoops : public RangedPass { public: - OPENVINO_RTTI("SplitLoops", "RangedPass") + OPENVINO_RTTI("SplitLoops", "", RangedPass); SplitLoops() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; @@ -49,7 +49,7 @@ class SplitLoops : public RangedPass { class TransformInnerSplitLoop : public pass::RangedPass { public: TransformInnerSplitLoop() = default; - OPENVINO_RTTI("TransformInnerSplitLoop", "RangedPass") + OPENVINO_RTTI("TransformInnerSplitLoop", "", RangedPass); bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; std::shared_ptr merge(const std::shared_ptr& other) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate.hpp b/src/common/snippets/include/snippets/lowered/pass/validate.hpp index 406aa67bc62f00..c40ace1a60c73d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate.hpp @@ -18,7 +18,7 @@ namespace pass { */ class Validate : public RangedPass { public: - OPENVINO_RTTI("Validate", "Pass") + OPENVINO_RTTI("Validate", "", RangedPass) Validate(); bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_buffers.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_buffers.hpp index b87697d054e4fb..57ebe11b219083 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_buffers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_buffers.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ValidateBuffers : public RangedPass { public: - OPENVINO_RTTI("ValidateBuffers", "Pass") + OPENVINO_RTTI("ValidateBuffers", "", RangedPass) ValidateBuffers() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp index 5774e6e1a03913..8e97bf9b83d0ef 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ValidateExpandedLoops : public Pass { public: - OPENVINO_RTTI("ValidateExpandedLoops", "Pass") + OPENVINO_RTTI("ValidateExpandedLoops", "", Pass); ValidateExpandedLoops() = default; bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp index 577bc46edd26d0..28e37b99a8646d 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ValidateShapes : public RangedPass { public: - OPENVINO_RTTI("ValidateShapes", "RangedPass") + OPENVINO_RTTI("ValidateShapes", "", RangedPass); ValidateShapes() = default; bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp index d78aaaa668363e..89380416298471 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp @@ -27,7 +27,7 @@ namespace pass { */ class ValidateUnifiedLoops : public Pass { public: - OPENVINO_RTTI("ValidateUnifiedLoops", "Pass") + OPENVINO_RTTI("ValidateUnifiedLoops", "", Pass); ValidateUnifiedLoops() = default; bool run(LinearIR& linear_ir) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp b/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp index 89024746ee7181..37a67fa2d20594 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fold_subgraph_empty_inputs.hpp @@ -40,7 +40,7 @@ class ov::pass::FoldSubgraphEmptyInputs : public ov::pass::MatcherPass { class ov::pass::DisableFoldSubgraphEmptyInputs : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("DisableFoldSubgraphEmptyInputs"); + OPENVINO_RTTI("DisableFoldSubgraphEmptyInputs", "0", ov::RuntimeAttribute); DisableFoldSubgraphEmptyInputs() = default; bool is_copyable() const override { return false; diff --git a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp index 3e20e7535f8fed..a7317022790e65 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp @@ -41,7 +41,7 @@ class ov::pass::MarkPrecisionSensitiveShapeOfSubgraphs : public ModelPass { */ class ov::pass::MarkShapeOfSubgraphs : public MarkPrecisionSensitiveShapeOfSubgraphs { public: - OPENVINO_RTTI("MarkShapeOfSubgraphs", "0"); + OPENVINO_RTTI("MarkShapeOfSubgraphs", "0", MarkPrecisionSensitiveShapeOfSubgraphs); MarkShapeOfSubgraphs(); }; @@ -53,7 +53,7 @@ class ov::pass::MarkShapeOfSubgraphs : public MarkPrecisionSensitiveShapeOfSubgr */ class ov::pass::MarkPrecisionSensitiveConstants : public MarkPrecisionSensitiveShapeOfSubgraphs { public: - OPENVINO_RTTI("MarkPrecisionSensitiveConstants", "0"); + OPENVINO_RTTI("MarkPrecisionSensitiveConstants", "0", MarkPrecisionSensitiveShapeOfSubgraphs); MarkPrecisionSensitiveConstants(); }; @@ -65,6 +65,6 @@ class ov::pass::MarkPrecisionSensitiveConstants : public MarkPrecisionSensitiveS */ class ov::pass::MarkDividesInShapeSubgraphs : public MarkPrecisionSensitiveShapeOfSubgraphs { public: - OPENVINO_RTTI("MarkDividesInShapeSubgraphs", "0"); + OPENVINO_RTTI("MarkDividesInShapeSubgraphs", "0", MarkPrecisionSensitiveShapeOfSubgraphs); MarkDividesInShapeSubgraphs(); }; diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp index 881fd9cb23e9c3..f4102b8c2eb4ff 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_concat_zero_dim_input.hpp @@ -36,7 +36,7 @@ OPENVINO_API bool remove_concat_zerodim_input_is_disabled(const std::shared_ptr< class DisableRemoveConcatZeroDimInput : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("DisableRemoveConcatZeroDimInput"); + OPENVINO_RTTI("DisableRemoveConcatZeroDimInput", "0", ov::RuntimeAttribute); DisableRemoveConcatZeroDimInput() = default; bool is_copyable() const override { return false; diff --git a/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp b/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp index acdd30580b1a23..c16c5186afe213 100644 --- a/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/strides_optimization.hpp @@ -62,6 +62,6 @@ class ov::pass::UnsupportedNodesStridesPropagation : public ov::pass::MatcherPas */ class ov::pass::StridesOptimization : public ov::pass::BackwardGraphRewrite { public: - OPENVINO_RTTI("StridesOptimization", "0"); + OPENVINO_RTTI("StridesOptimization", "0", ov::pass::BackwardGraphRewrite); StridesOptimization(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index 32a2f7a3ace512..dc4fff45742b75 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -43,19 +43,19 @@ class ConvertReduceBase : public ov::pass::MatcherPass { class ov::pass::ConvertReduceMeanToPooling : public ConvertReduceBase { public: - OPENVINO_RTTI("ConvertReduceMeanToPooling", "0"); + OPENVINO_RTTI("ConvertReduceMeanToPooling", "0", ConvertReduceBase); ConvertReduceMeanToPooling(); }; class ov::pass::ConvertReduceMaxToPooling : public ConvertReduceBase { public: - OPENVINO_RTTI("ConvertReduceMaxToPooling", "0"); + OPENVINO_RTTI("ConvertReduceMaxToPooling", "0", ConvertReduceBase); ConvertReduceMaxToPooling(); }; class ov::pass::ConvertReduceSumToPooling : public ConvertReduceBase { public: - OPENVINO_RTTI("ConvertReduceSumToPooling", "0"); + OPENVINO_RTTI("ConvertReduceSumToPooling", "0", ConvertReduceBase); ConvertReduceSumToPooling(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp index 9eeb3e5c0f8da6..83fb0ebcf9e3bc 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp @@ -42,43 +42,43 @@ class CvtReduceBase : public ov::pass::MatcherPass { class ov::pass::ConvertReduceMeanToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceMeanToReshape", "0"); + OPENVINO_RTTI("ConvertReduceMeanToReshape", "0", CvtReduceBase); ConvertReduceMeanToReshape(); }; class ov::pass::ConvertReduceSumToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceSumToReshape", "0"); + OPENVINO_RTTI("ConvertReduceSumToReshape", "0", CvtReduceBase); ConvertReduceSumToReshape(); }; class ov::pass::ConvertReduceProdToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceProdToReshape", "0"); + OPENVINO_RTTI("ConvertReduceProdToReshape", "0", CvtReduceBase); ConvertReduceProdToReshape(); }; class ov::pass::ConvertReduceMaxToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceMaxToReshape", "0"); + OPENVINO_RTTI("ConvertReduceMaxToReshape", "0", CvtReduceBase); ConvertReduceMaxToReshape(); }; class ov::pass::ConvertReduceMinToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceMinToReshape", "0"); + OPENVINO_RTTI("ConvertReduceMinToReshape", "0", CvtReduceBase); ConvertReduceMinToReshape(); }; class ov::pass::ConvertReduceLogicalAndToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceLogicalAndToReshape", "0"); + OPENVINO_RTTI("ConvertReduceLogicalAndToReshape", "0", CvtReduceBase); ConvertReduceLogicalAndToReshape(); }; class ov::pass::ConvertReduceLogicalOrToReshape : public CvtReduceBase { public: - OPENVINO_RTTI("ConvertReduceLogicalOrToReshape", "0"); + OPENVINO_RTTI("ConvertReduceLogicalOrToReshape", "0", CvtReduceBase); ConvertReduceLogicalOrToReshape(); }; diff --git a/src/common/transformations/include/transformations/rt_info/decompression.hpp b/src/common/transformations/include/transformations/rt_info/decompression.hpp index 84f474c247a35f..c86f824e45db69 100644 --- a/src/common/transformations/include/transformations/rt_info/decompression.hpp +++ b/src/common/transformations/include/transformations/rt_info/decompression.hpp @@ -30,7 +30,7 @@ TRANSFORMATIONS_API bool is_decompression(const std::shared_ptr& node); */ class TRANSFORMATIONS_API Decompression : public RuntimeAttribute { public: - OPENVINO_RTTI("decompression", "0"); + OPENVINO_RTTI("decompression", "0", RuntimeAttribute); Decompression() = default; diff --git a/src/common/transformations/include/transformations/rt_info/dequantization_node.hpp b/src/common/transformations/include/transformations/rt_info/dequantization_node.hpp index d9cf3589391b6d..cc1015c83dbc46 100644 --- a/src/common/transformations/include/transformations/rt_info/dequantization_node.hpp +++ b/src/common/transformations/include/transformations/rt_info/dequantization_node.hpp @@ -21,7 +21,7 @@ TRANSFORMATIONS_API bool is_dequantization_node(const std::shared_ptr& nod */ class TRANSFORMATIONS_API DequantizationNode : public RuntimeAttribute { public: - OPENVINO_RTTI("dequantization_node", "0"); + OPENVINO_RTTI("dequantization_node", "0", RuntimeAttribute); bool is_copyable() const override { return false; diff --git a/src/common/transformations/include/transformations/rt_info/disable_fp16_compression.hpp b/src/common/transformations/include/transformations/rt_info/disable_fp16_compression.hpp index 462b9c54a57138..8a76157d00ce8b 100644 --- a/src/common/transformations/include/transformations/rt_info/disable_fp16_compression.hpp +++ b/src/common/transformations/include/transformations/rt_info/disable_fp16_compression.hpp @@ -30,7 +30,7 @@ TRANSFORMATIONS_API void do_not_postpone_fp16_compression(RTMap& rt_info); */ class TRANSFORMATIONS_API DisableFP16Compression : public RuntimeAttribute { public: - OPENVINO_RTTI("precise", "0"); + OPENVINO_RTTI("precise", "0", RuntimeAttribute); DisableFP16Compression() = default; diff --git a/src/common/transformations/include/transformations/rt_info/fused_names_attribute.hpp b/src/common/transformations/include/transformations/rt_info/fused_names_attribute.hpp index 9835bb1d25193d..76e010bfcd6c57 100644 --- a/src/common/transformations/include/transformations/rt_info/fused_names_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/fused_names_attribute.hpp @@ -28,10 +28,8 @@ namespace ov { * all operation names that was fully or partially fused into node */ class TRANSFORMATIONS_API FusedNames : public ov::RuntimeAttribute { - std::set fused_names; - public: - OPENVINO_RTTI("fused_names", "0"); + OPENVINO_RTTI("fused_names", "0", RuntimeAttribute); /** * A default constructor @@ -70,6 +68,9 @@ class TRANSFORMATIONS_API FusedNames : public ov::RuntimeAttribute { bool visit_attributes(AttributeVisitor& visitor) override; std::string to_string() const override; + +private: + std::set fused_names; }; /** diff --git a/src/common/transformations/include/transformations/rt_info/is_shape_subgraph.hpp b/src/common/transformations/include/transformations/rt_info/is_shape_subgraph.hpp index 63ebe86d72d61e..1f526b45cc9ff0 100644 --- a/src/common/transformations/include/transformations/rt_info/is_shape_subgraph.hpp +++ b/src/common/transformations/include/transformations/rt_info/is_shape_subgraph.hpp @@ -23,7 +23,7 @@ TRANSFORMATIONS_API bool is_shape_subgraph(const std::shared_ptr& no */ class TRANSFORMATIONS_API ShapeSubgraph : public RuntimeAttribute { public: - OPENVINO_RTTI("shape_subgraph", "0"); + OPENVINO_RTTI("shape_subgraph", "0", RuntimeAttribute); ShapeSubgraph() = default; diff --git a/src/common/transformations/include/transformations/rt_info/keep_const_precision.hpp b/src/common/transformations/include/transformations/rt_info/keep_const_precision.hpp index 46c8bf00deb3f2..1d63e1b9ff0d90 100644 --- a/src/common/transformations/include/transformations/rt_info/keep_const_precision.hpp +++ b/src/common/transformations/include/transformations/rt_info/keep_const_precision.hpp @@ -23,7 +23,7 @@ TRANSFORMATIONS_API bool is_keep_const_precision(const std::shared_ptr& n */ class TRANSFORMATIONS_API NonconvertibleDivide : public RuntimeAttribute { public: - OPENVINO_RTTI("nonconvertable_divide", "0"); + OPENVINO_RTTI("nonconvertable_divide", "0", RuntimeAttribute); NonconvertibleDivide() = default; diff --git a/src/common/transformations/include/transformations/rt_info/old_api_map_element_type_attribute.hpp b/src/common/transformations/include/transformations/rt_info/old_api_map_element_type_attribute.hpp index 028e8ebf25560d..4fc801640a2456 100644 --- a/src/common/transformations/include/transformations/rt_info/old_api_map_element_type_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/old_api_map_element_type_attribute.hpp @@ -29,7 +29,7 @@ namespace ov { */ class TRANSFORMATIONS_API OldApiMapElementType : public RuntimeAttribute { public: - OPENVINO_RTTI("old_api_map_element_type", "0"); + OPENVINO_RTTI("old_api_map_element_type", "0", RuntimeAttribute); /** * A default constructor diff --git a/src/common/transformations/include/transformations/rt_info/old_api_map_order_attribute.hpp b/src/common/transformations/include/transformations/rt_info/old_api_map_order_attribute.hpp index a790dc83fcd409..6f02c5b1534680 100644 --- a/src/common/transformations/include/transformations/rt_info/old_api_map_order_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/old_api_map_order_attribute.hpp @@ -40,7 +40,7 @@ class OldApiMapOrder; */ class TRANSFORMATIONS_API OldApiMapOrder : public RuntimeAttribute { public: - OPENVINO_RTTI("old_api_map_order", "0"); + OPENVINO_RTTI("old_api_map_order", "0", RuntimeAttribute); /** * A default constructor diff --git a/src/common/transformations/include/transformations/rt_info/original_precision_attribute.hpp b/src/common/transformations/include/transformations/rt_info/original_precision_attribute.hpp index 428db0cf62a889..cb47d5406ea5a1 100644 --- a/src/common/transformations/include/transformations/rt_info/original_precision_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/original_precision_attribute.hpp @@ -23,7 +23,7 @@ TRANSFORMATIONS_API element::Type_t get_original_precision(const std::shared_ptr */ class TRANSFORMATIONS_API OriginalPrecisionAttribute : public RuntimeAttribute { public: - OPENVINO_RTTI("original_precision", "0"); + OPENVINO_RTTI("original_precision", "0", RuntimeAttribute); }; } // namespace ov diff --git a/src/common/transformations/include/transformations/rt_info/preprocessing_attribute.hpp b/src/common/transformations/include/transformations/rt_info/preprocessing_attribute.hpp index 0511ec6bd7993a..8e615db018c10d 100644 --- a/src/common/transformations/include/transformations/rt_info/preprocessing_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/preprocessing_attribute.hpp @@ -21,7 +21,7 @@ TRANSFORMATIONS_API void set_is_preprocessing_node(std::shared_ptr node); */ class TRANSFORMATIONS_API PreprocessingAttribute : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("preprocessing", "0"); + OPENVINO_RTTI("preprocessing", "0", ov::RuntimeAttribute); PreprocessingAttribute() = default; bool visit_attributes(AttributeVisitor& visitor) override { return true; diff --git a/src/common/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp b/src/common/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp index dc18fb458cd4e5..ee5acde5b9f3ad 100644 --- a/src/common/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp +++ b/src/common/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp @@ -31,7 +31,7 @@ TRANSFORMATIONS_API std::string getPrimitivesPriority(const std::shared_ptr& node); class TRANSFORMATIONS_API StridesPropagation : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("strides_propagation", "0"); + OPENVINO_RTTI("strides_propagation", "0", ov::RuntimeAttribute); StridesPropagation() = default; StridesPropagation(const ov::Strides& value) : value{value} {} diff --git a/src/common/transformations/include/transformations/rt_info/transpose_sinking_attr.hpp b/src/common/transformations/include/transformations/rt_info/transpose_sinking_attr.hpp index 5e0718b906c32f..5ff0051e6e1882 100644 --- a/src/common/transformations/include/transformations/rt_info/transpose_sinking_attr.hpp +++ b/src/common/transformations/include/transformations/rt_info/transpose_sinking_attr.hpp @@ -24,7 +24,7 @@ TRANSFORMATIONS_API bool is_sinking_node(ov::Output output); */ class TRANSFORMATIONS_API NoTransposeSinkingAttr : public RuntimeAttribute { public: - OPENVINO_RTTI("no_transpose_sinking", "0"); + OPENVINO_RTTI("no_transpose_sinking", "0", RuntimeAttribute); bool is_copyable() const override { return false; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp index b8eca55dcb9685..5eb90b6201b5cb 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_binary.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSBinaryBackward; */ class ov::pass::transpose_sinking::TSBinaryForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSBinaryForward", "0"); + OPENVINO_RTTI("ov::pass::TSBinaryForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSBinaryForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp index 5b6477da94a80d..b453a2112575ed 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_concat.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSConcatBackward; */ class ov::pass::transpose_sinking::TSConcatForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSConcatForward", "0"); + OPENVINO_RTTI("ov::pass::TSConcatForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSConcatForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp index 741c56d5be0de7..51f4251233825f 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_cumsum.hpp @@ -25,7 +25,7 @@ class TRANSFORMATIONS_API TSCumSumBackward; */ class ov::pass::transpose_sinking::TSCumSumForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSBinaryForward", "0"); + OPENVINO_RTTI("ov::pass::TSBinaryForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSCumSumForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp index e1a4f34a109eec..b004615fe9707d 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_data_movement.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API TSDataMovementBackward; */ class ov::pass::transpose_sinking::TSDataMovementForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSDataMovementForward", "0"); + OPENVINO_RTTI("ov::pass::TSDataMovementForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSDataMovementForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp index 891b8bd85c2ed4..c0e1d9569b1160 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_gather.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSGatherBackward; */ class ov::pass::transpose_sinking::TSGatherForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSGatherForward", "0"); + OPENVINO_RTTI("ov::pass::TSGatherForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSGatherForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp index 90ae417aca9fc6..98a036fc9760c7 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_interpolate.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSInterpolateBackward; */ class ov::pass::transpose_sinking::TSInterpolateForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSInterpolateForward", "0"); + OPENVINO_RTTI("ov::pass::TSInterpolateForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSInterpolateForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp index d2992bc8a4abd8..80a33cb62bd3c9 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_reduction.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSReductionBackward; */ class ov::pass::transpose_sinking::TSReductionForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSReductionForward", "0"); + OPENVINO_RTTI("ov::pass::TSReductionForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSReductionForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_shape_of.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_shape_of.hpp index e1f5a046203f53..adc730e1d4cd49 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_shape_of.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_shape_of.hpp @@ -46,6 +46,6 @@ class TRANSFORMATIONS_API TSShapeOfForward; */ class ov::pass::transpose_sinking::TSShapeOfForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("TSShapeOfForward", "0"); + OPENVINO_RTTI("TSShapeOfForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSShapeOfForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp index 12ccc614861140..11d416ea23b4a8 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_slice.hpp @@ -21,7 +21,7 @@ class TRANSFORMATIONS_API TSSliceBackward; class ov::pass::transpose_sinking::TSSliceForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSSliceForward", "0"); + OPENVINO_RTTI("ov::pass::TSSliceForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSSliceForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp index b21bada67ad368..54d5bf5ec40aad 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_split.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API TSSplitForward; */ class ov::pass::transpose_sinking::TSSplitForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSSplitForward", "0"); + OPENVINO_RTTI("ov::pass::TSSplitForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSSplitForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp index 0d86d0a4c29242..2d5a60a196d5aa 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_squeeze.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSSqueezeBackward; */ class ov::pass::transpose_sinking::TSSqueezeForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSSqueezeForward", "0"); + OPENVINO_RTTI("ov::pass::TSSqueezeForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSSqueezeForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp index ffd14ce9a38d84..782bab2089564b 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_tile.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSTileBackward; */ class ov::pass::transpose_sinking::TSTileForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSBinaryForward", "0"); + OPENVINO_RTTI("ov::pass::TSBinaryForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSTileForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp index 1d745ada561224..f9e27494cbf84a 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_unary.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSUnaryBackward; */ class ov::pass::transpose_sinking::TSUnaryForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("TSUnaryForward", "0"); + OPENVINO_RTTI("TSUnaryForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSUnaryForward(); }; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp index 60e5f8f7893961..8b4e9bb2f3b9e9 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_unsqueeze.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API TSUnsqueezeBackward; */ class ov::pass::transpose_sinking::TSUnsqueezeForward : public ov::pass::transpose_sinking::TSForwardBase { public: - OPENVINO_RTTI("ov::pass::TSUnsqueezeForward", "0"); + OPENVINO_RTTI("ov::pass::TSUnsqueezeForward", "0", ov::pass::transpose_sinking::TSForwardBase); TSUnsqueezeForward(); }; diff --git a/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp b/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp index 3883e94b74d33c..b5977aa93bc97b 100644 --- a/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/nop_elimination.cpp @@ -350,7 +350,7 @@ static bool eliminate_unsqueeze(const shared_ptr& node) { #define SIMPLE_MATCHER_PASS_DEFINITION(NAME, FUNC, ...) \ class NAME : public ov::pass::MatcherPass { \ public: \ - OPENVINO_RTTI(STR(NAME), "0"); \ + OPENVINO_RTTI(STR(NAME), "0", ov::pass::MatcherPass); \ NAME() { \ MATCHER_SCOPE(NAME); \ auto match_node = ov::pass::pattern::wrap_type<__VA_ARGS__>(); \ diff --git a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp index fc667bd23a97b4..addc8b111e82a7 100644 --- a/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.cpp @@ -271,7 +271,7 @@ class MarkExp : public pass::MatcherPass { */ class MarkExpInReduceOpPath : public BackwardGraphRewrite { public: - OPENVINO_RTTI("MarkExpInReduceOpPath", "0"); + OPENVINO_RTTI("MarkExpInReduceOpPath", "0", BackwardGraphRewrite); MarkExpInReduceOpPath() { // marking of ReduceOp path is needed to mark only Exponents that go into ReduceSum/ReduceMean ADD_MATCHER_FOR_THIS(InitMarkReduceOpPath); diff --git a/src/core/dev_api/openvino/core/rt_info/weightless_caching_attributes.hpp b/src/core/dev_api/openvino/core/rt_info/weightless_caching_attributes.hpp index e3cf2609b26c8d..49f67f91e5cbde 100644 --- a/src/core/dev_api/openvino/core/rt_info/weightless_caching_attributes.hpp +++ b/src/core/dev_api/openvino/core/rt_info/weightless_caching_attributes.hpp @@ -22,7 +22,7 @@ namespace ov { */ class OPENVINO_API WeightlessCacheAttribute : public RuntimeAttribute { public: - OPENVINO_RTTI("WeightlessCacheAttribute"); + OPENVINO_RTTI("WeightlessCacheAttribute", "0", RuntimeAttribute); WeightlessCacheAttribute() = delete; diff --git a/src/core/include/openvino/core/layout.hpp b/src/core/include/openvino/core/layout.hpp index 71d00855c64b94..c9e1c6f2c3d466 100644 --- a/src/core/include/openvino/core/layout.hpp +++ b/src/core/include/openvino/core/layout.hpp @@ -205,7 +205,7 @@ class OPENVINO_API AttributeAdapter : public ValueAccessor class OPENVINO_API LayoutAttribute : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("layout", "0"); + OPENVINO_RTTI("layout", "0", RuntimeAttribute); LayoutAttribute() = default; diff --git a/src/core/include/openvino/core/preprocess/input_tensor_info.hpp b/src/core/include/openvino/core/preprocess/input_tensor_info.hpp index f1b9ce4e1e75b5..e38760aa1a123c 100644 --- a/src/core/include/openvino/core/preprocess/input_tensor_info.hpp +++ b/src/core/include/openvino/core/preprocess/input_tensor_info.hpp @@ -16,7 +16,7 @@ namespace preprocess { class OPENVINO_API TensorInfoMemoryType : public RuntimeAttribute { public: - OPENVINO_RTTI("memory_type", "0"); + OPENVINO_RTTI("memory_type", "0", RuntimeAttribute); TensorInfoMemoryType() = default; diff --git a/src/core/include/openvino/op/util/precision_sensitive_attribute.hpp b/src/core/include/openvino/op/util/precision_sensitive_attribute.hpp index d17167308a8286..926267e296e072 100644 --- a/src/core/include/openvino/op/util/precision_sensitive_attribute.hpp +++ b/src/core/include/openvino/op/util/precision_sensitive_attribute.hpp @@ -23,7 +23,7 @@ bool OPENVINO_API is_precision_sensitive(const ov::Input& node_input); */ class OPENVINO_API PrecisionSensitive : public RuntimeAttribute { public: - OPENVINO_RTTI("precision_sensitive", "0"); + OPENVINO_RTTI("precision_sensitive", "0", RuntimeAttribute); PrecisionSensitive() = default; diff --git a/src/core/include/openvino/op/util/symbolic_info.hpp b/src/core/include/openvino/op/util/symbolic_info.hpp index 0b2f5d089689b0..5979a562ac1697 100644 --- a/src/core/include/openvino/op/util/symbolic_info.hpp +++ b/src/core/include/openvino/op/util/symbolic_info.hpp @@ -27,7 +27,7 @@ OPENVINO_API void populate_tensor_with_missing_symbols(ov::descriptor::Tensor& t */ class OPENVINO_API SkipInvalidation : public RuntimeAttribute { public: - OPENVINO_RTTI("SkipInvalidation", "0"); + OPENVINO_RTTI("SkipInvalidation", "0", RuntimeAttribute); SkipInvalidation() = default; bool is_copyable() const override { return false; diff --git a/src/core/include/openvino/pass/constant_folding.hpp b/src/core/include/openvino/pass/constant_folding.hpp index 6b2e763e49cb1a..f67d8e2fc71a11 100644 --- a/src/core/include/openvino/pass/constant_folding.hpp +++ b/src/core/include/openvino/pass/constant_folding.hpp @@ -65,7 +65,7 @@ OPENVINO_API bool constant_folding_is_disabled(const Node* const node); class OPENVINO_API DisableConstantFolding : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("DisableConstantFolding"); + OPENVINO_RTTI("DisableConstantFolding", "0", ov::RuntimeAttribute); DisableConstantFolding() = default; bool is_copyable() const override { return false; diff --git a/src/core/src/pass/constant_folding.cpp b/src/core/src/pass/constant_folding.cpp index cc1a7cea5b5add..96a60084980e7e 100644 --- a/src/core/src/pass/constant_folding.cpp +++ b/src/core/src/pass/constant_folding.cpp @@ -75,7 +75,7 @@ static bool restore_original_input_precision(const std::shared_ptr& no class RequiresPrecisionConversion : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("requires_precision_conversion", "0"); + OPENVINO_RTTI("requires_precision_conversion", "0", RuntimeAttribute); bool is_copyable() const override { return false; diff --git a/src/core/tests/copy_runtime_info.cpp b/src/core/tests/copy_runtime_info.cpp index c32a4b87c66cac..3e0af8f2b8644d 100644 --- a/src/core/tests/copy_runtime_info.cpp +++ b/src/core/tests/copy_runtime_info.cpp @@ -21,7 +21,7 @@ using namespace std; class TestAttributeNoCopyable : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("TestAttributeNoCopyable"); + OPENVINO_RTTI("TestAttributeNoCopyable", "0", RuntimeAttribute); TestAttributeNoCopyable() = default; bool is_copyable() const override { return false; @@ -40,7 +40,7 @@ class TestAttributeNoCopyable : public ov::RuntimeAttribute { class TestAttributeCopyable : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("TestAttributeCopyable"); + OPENVINO_RTTI("TestAttributeCopyable", "0", RuntimeAttribute); TestAttributeCopyable() = default; static void set(std::shared_ptr node) { @@ -56,7 +56,7 @@ class TestAttributeCopyable : public ov::RuntimeAttribute { class TestAttributeMergable : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("TestAttributeMergable"); + OPENVINO_RTTI("TestAttributeMergable", "0", RuntimeAttribute); TestAttributeMergable() = default; static void set(std::shared_ptr node) { diff --git a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp index 9306d909cc109b..fd3fb18f508c07 100644 --- a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp +++ b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp @@ -15,7 +15,7 @@ namespace tensorflow { /// Abstract representation for an input model graph that gives nodes in topologically sorted order class FRONTEND_API GraphIterator : ::ov::RuntimeAttribute { public: - OPENVINO_RTTI("Variant::GraphIterator"); + OPENVINO_RTTI("Variant::GraphIterator", "0", RuntimeAttribute); using Ptr = std::shared_ptr; diff --git a/src/frontends/tensorflow/src/tf_utils.hpp b/src/frontends/tensorflow/src/tf_utils.hpp index c585c9ae9019af..96bb0168237447 100644 --- a/src/frontends/tensorflow/src/tf_utils.hpp +++ b/src/frontends/tensorflow/src/tf_utils.hpp @@ -39,7 +39,7 @@ using SetOfBranchIndices = std::unordered_set; // structure to save conditional flow marker class CfMarkerType : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("CfMarkerType"); + OPENVINO_RTTI("CfMarkerType", "0", RuntimeAttribute); CfMarkerType() = default; bool is_copyable() const override; diff --git a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/quantization_info.hpp b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/quantization_info.hpp index 66977db1caa5d4..ebf42094ed6d00 100644 --- a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/quantization_info.hpp +++ b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/quantization_info.hpp @@ -17,7 +17,7 @@ namespace tensorflow_lite { class TENSORFLOW_LITE_API QuantizationInfo : public ov::RuntimeAttribute { public: - OPENVINO_RTTI("QuantizationInfo"); + OPENVINO_RTTI("QuantizationInfo", "0", RuntimeAttribute); QuantizationInfo() = default; explicit QuantizationInfo(const std::vector& scale, const std::vector& zero_point, diff --git a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/sparsity_info.hpp b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/sparsity_info.hpp index c1ab8d4fd04941..515d96a6faaf07 100644 --- a/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/sparsity_info.hpp +++ b/src/frontends/tensorflow_lite/include/openvino/frontend/tensorflow_lite/sparsity_info.hpp @@ -18,6 +18,8 @@ namespace tensorflow_lite { class TENSORFLOW_LITE_API SparsityInfo : public ov::RuntimeAttribute { public: + OPENVINO_RTTI("SparsityInfo", "0", RuntimeAttribute); + struct SparsityDataDesc { uint8_t segments_type; const void* segments; @@ -25,7 +27,6 @@ class TENSORFLOW_LITE_API SparsityInfo : public ov::RuntimeAttribute { const void* indices; }; - OPENVINO_RTTI("SparsityInfo"); SparsityInfo() = default; explicit SparsityInfo(const std::vector& shape, const std::vector& traversal_order, diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp index 7898833529294b..2ebe679318afee 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp @@ -61,13 +61,13 @@ class ConvertConv1DBase : public ov::pass::MatcherPass { class ConvertConv1D : public ConvertConv1DBase { public: - OPENVINO_RTTI("ConvertConv1D", "0"); + OPENVINO_RTTI("ConvertConv1D", "0", ConvertConv1DBase); ConvertConv1D(); }; class ConvertGroupConv1D : public ConvertConv1DBase { public: - OPENVINO_RTTI("ConvertGroupConv1D", "0"); + OPENVINO_RTTI("ConvertGroupConv1D", "0", ConvertConv1DBase); ConvertGroupConv1D(); }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp index 4b0bb0e8c81f8a..1b8ce66eab5a22 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp @@ -58,25 +58,25 @@ class ConvertReduceMultiAxisBase : public ov::pass::MatcherPass { class ConvertReduceProd : public ConvertReduceMultiAxisBase { public: - OPENVINO_RTTI("ConvertReduceProd", "0"); + OPENVINO_RTTI("ConvertReduceProd", "0", ConvertReduceMultiAxisBase); ConvertReduceProd(); }; class ConvertReduceMin : public ConvertReduceMultiAxisBase { public: - OPENVINO_RTTI("ConvertReduceMin", "0"); + OPENVINO_RTTI("ConvertReduceMin", "0", ConvertReduceMultiAxisBase); ConvertReduceMin(); }; class ConvertReduceMax : public ConvertReduceMultiAxisBase { public: - OPENVINO_RTTI("ConvertReduceMax", "0"); + OPENVINO_RTTI("ConvertReduceMax", "0", ConvertReduceMultiAxisBase); ConvertReduceMax(); }; class ConvertReduceSum : public ConvertReduceMultiAxisBase { public: - OPENVINO_RTTI("ConvertReduceSum", "0"); + OPENVINO_RTTI("ConvertReduceSum", "0", ConvertReduceMultiAxisBase); ConvertReduceSum(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp index 9684a047afa08e..8ebde84405695b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp @@ -53,7 +53,7 @@ class ConvertReduceNoKeepDimsBase : public ov::pass::MatcherPass { template class ConvertReduction : public ConvertReduceNoKeepDimsBase { public: - OPENVINO_RTTI("ConvertReduction", "0"); + OPENVINO_RTTI("ConvertReduction", "0", ConvertReduceNoKeepDimsBase); ConvertReduction(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.hpp index 220003cc83ead1..ea4f085f8ed59a 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.hpp @@ -23,7 +23,7 @@ namespace intel_cpu { class MoveReadValueInputsToSubgraph : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveReadValueInputsToSubgraph", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveReadValueInputsToSubgraph"); MoveReadValueInputsToSubgraph(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp index 59494736bb2c2e..34ed4bee95622c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp @@ -16,7 +16,7 @@ class StatefulSDPAFusion : public ov::pass::MatcherPass { class SDPASubgraphFusion : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SDPASubgraphFusion", "0"); + OPENVINO_MODEL_PASS_RTTI("SDPASubgraphFusion"); bool run_on_model(const std::shared_ptr& f) override; }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp index d84c11af9801e5..123e8b51a3d965 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp @@ -30,25 +30,25 @@ class MHAFusionBase : public ov::pass::MatcherPass { class MHAFloatFusion : public MHAFusionBase { public: - OPENVINO_RTTI("MHAFloatFusion", "0"); + OPENVINO_RTTI("MHAFloatFusion", "0", MHAFusionBase); MHAFloatFusion(); }; class MHAFloatFusion2 : public MHAFusionBase { public: - OPENVINO_RTTI("MHAFloatFusion2", "0"); + OPENVINO_RTTI("MHAFloatFusion2", "0", MHAFusionBase); MHAFloatFusion2(); }; class MHAQuantFusion : public MHAFusionBase { public: - OPENVINO_RTTI("MHAQuantFusion", "0"); + OPENVINO_RTTI("MHAQuantFusion", "0", MHAFusionBase); MHAQuantFusion(); }; class MHAQuantFusion2 : public MHAFusionBase { public: - OPENVINO_RTTI("MHAQuantFusion2", "0"); + OPENVINO_RTTI("MHAQuantFusion2", "0", MHAFusionBase); MHAQuantFusion2(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp index 9ed4162a433411..e765bff89955bd 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp @@ -20,8 +20,8 @@ namespace pass { */ class AdjustBrgemmCopyBLoopPorts : public snippets::lowered::pass::ConstPass { public: + OPENVINO_RTTI("AdjustBrgemmCopyBLoopPorts", "0", snippets::lowered::pass::ConstPass); AdjustBrgemmCopyBLoopPorts() = default; - OPENVINO_RTTI("AdjustBrgemmCopyBLoopPorts", "ConstPass"); bool run(const snippets::lowered::LinearIR& linear_ir) override; static bool update_loop_info(const snippets::lowered::UnifiedLoopInfoPtr& uni_loop_info); const std::unordered_set& get_affected_loops() { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp index e354dbe0c5a7fa..595ac3f37aa337 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp @@ -18,7 +18,7 @@ namespace pass { */ class BrgemmCPUBlocking : public ov::snippets::lowered::pass::BrgemmBlocking { public: - OPENVINO_RTTI("BrgemmCPUBlocking", "BrgemmBlocking") + OPENVINO_RTTI("BrgemmCPUBlocking", "BrgemmCPU", ov::snippets::lowered::pass::BrgemmBlocking); /** * @interface DummyPass @@ -29,7 +29,7 @@ class BrgemmCPUBlocking : public ov::snippets::lowered::pass::BrgemmBlocking { public: - OPENVINO_RTTI("BrgemmTPPBlocking", "BrgemmBlockingBase") + OPENVINO_RTTI("BrgemmTPPBlocking", + "tpp::op::BrgemmTPP", + snippets::lowered::pass::BrgemmBlocking); /** * @interface SetBrgemmBeta @@ -29,8 +31,8 @@ class BrgemmTPPBlocking : public ov::snippets::lowered::pass::BrgemmBlocking