Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into tj/rtti/ov-dynami…
Browse files Browse the repository at this point in the history
…c-cast-on-android-only
  • Loading branch information
t-jankowski committed Jan 20, 2025
2 parents d81a04a + 3e8bc27 commit 25f884d
Show file tree
Hide file tree
Showing 16 changed files with 125 additions and 148 deletions.
24 changes: 11 additions & 13 deletions src/plugins/intel_cpu/src/nodes/depth_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
#include "openvino/opsets/opset1.hpp"
#include "utils/general_utils.h"

#define THROW_ERROR(...) OPENVINO_THROW("DepthToSpace layer with name '", getName(), "' ", __VA_ARGS__)

using namespace dnnl::impl;

namespace ov {
Expand Down Expand Up @@ -73,34 +71,34 @@ DepthToSpace::DepthToSpace(const std::shared_ptr<ov::Node>& op, const GraphConte
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
if (inputShapes.size() != 1 || outputShapes.size() != 1)
THROW_ERROR("has incorrect number of input/output edges!");
THROW_CPU_NODE_ERR("has incorrect number of input/output edges!");

auto depthToSpace = ov::as_type_ptr<const ov::opset1::DepthToSpace>(op);
if (!depthToSpace)
THROW_ERROR("supports only opset1");
THROW_CPU_NODE_ERR("supports only opset1");

const auto modeNgraph = depthToSpace->get_mode();
if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST) {
attrs.mode = Mode::BLOCKS_FIRST;
} else if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) {
attrs.mode = Mode::DEPTH_FIRST;
} else {
THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph));
THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph));
}

attrs.blockSize = depthToSpace->get_block_size();
if (attrs.blockSize == 0)
THROW_ERROR("has incorrect block_size parameter is zero!");
THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!");

const size_t srcRank = getInputShapeAtPort(0).getRank();
const size_t dstRank = getOutputShapeAtPort(0).getRank();

if (srcRank < 3)
THROW_ERROR("has incorrect number of input dimensions");
THROW_CPU_NODE_ERR("has incorrect number of input dimensions");
if (srcRank > 5)
THROW_ERROR("doesn't support dimensions with rank greater than 5");
THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5");
if (srcRank != dstRank)
THROW_ERROR("has incorrect number of input/output dimensions");
THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions");

const size_t nSpatialDims = srcRank - 2;
attrs.blockStep = static_cast<size_t>(std::pow(attrs.blockSize, nSpatialDims));
Expand Down Expand Up @@ -164,11 +162,11 @@ void DepthToSpace::createPrimitive() {
auto dstMemPtr = getDstMemoryAtPort(0);
auto srcMemPtr = getSrcMemoryAtPort(0);
if (!dstMemPtr)
THROW_ERROR("has null destination memory");
THROW_CPU_NODE_ERR("has null destination memory");
if (!srcMemPtr)
THROW_ERROR("has null input memory");
THROW_CPU_NODE_ERR("has null input memory");
if (getSelectedPrimitiveDescriptor() == nullptr)
THROW_ERROR("has unidentified preferable primitive descriptor");
THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor");

const auto& memoryDesc = srcMemPtr->getDesc();
attrs.dataSize = memoryDesc.getPrecision().size();
Expand Down Expand Up @@ -305,7 +303,7 @@ void DepthToSpace::DepthToSpaceExecutor::exec(const MemoryPtr& srcMemPtr, const

void DepthToSpace::execute(const dnnl::stream& strm) {
if (!execPtr) {
THROW_ERROR("doesn't have a compiled executor.");
THROW_CPU_NODE_ERR("doesn't have a compiled executor.");
}

int MB = getSrcMemoryAtPort(0)->getStaticDims()[0];
Expand Down
2 changes: 0 additions & 2 deletions src/plugins/intel_cpu/src/nodes/eye.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
#include "shape_inference/shape_inference.hpp"
#include "utils/bfloat16.hpp"

#define THROW_ERROR(...) OPENVINO_THROW(NameFromType(getType()), " node with name '", getName(), "' ", __VA_ARGS__)

namespace ov {
namespace intel_cpu {
namespace node {
Expand Down
20 changes: 9 additions & 11 deletions src/plugins/intel_cpu/src/nodes/gather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@

using namespace dnnl::impl::cpu;

#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)

namespace ov {
namespace intel_cpu {
namespace node {
Expand Down Expand Up @@ -69,7 +67,7 @@ Gather::Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co
if (one_of(op->get_input_size(), 4u, 5u) && op->get_output_size() == 1u) {
compressed = true;
} else if (op->get_input_size() != 3 || op->get_output_size() != 1) {
THROW_ERROR("has incorrect number of input/output edges!");
THROW_CPU_NODE_ERR("has incorrect number of input/output edges!");
}

const auto& dataShape = getInputShapeAtPort(GATHER_DATA);
Expand All @@ -80,7 +78,7 @@ Gather::Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co
isIdxShapeStat = idxShape.isStatic();
const auto indicesRank = idxShape.getRank();
if (dataSrcRank == 0lu || indicesRank == 0lu)
THROW_ERROR("has incorrect input parameters ranks.");
THROW_CPU_NODE_ERR("has incorrect input parameters ranks.");

if (ov::is_type<ov::op::v8::Gather>(op)) {
batchDims = static_cast<int>(ov::as_type_ptr<ov::op::v8::Gather>(op)->get_batch_dims());
Expand All @@ -104,15 +102,15 @@ Gather::Gather(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co
if (batchDims < 0)
batchDims += indicesRank;
if (batchDims < 0 || batchDims > std::min(static_cast<int>(dataSrcRank), static_cast<int>(indicesRank)))
THROW_ERROR("has incorrect batch_dims ", batchDims, "!");
THROW_CPU_NODE_ERR("has incorrect batch_dims ", batchDims, "!");

if (ov::is_type<ov::op::v0::Constant>(op->get_input_node_ptr(GATHER_AXIS))) {
isAxisInputConst = true;
axis = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(GATHER_AXIS))->cast_vector<int>()[0];
if (axis < 0)
axis += dataSrcRank;
if (axis < 0 || axis >= dataSrcRank || batchDims > axis)
THROW_ERROR("has incorrect input parameter axis value: ", axis);
THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis);
}

if (auto indices = ov::as_type<ov::op::v0::Constant>(op->get_input_node_ptr(GATHER_INDICES))) {
Expand Down Expand Up @@ -339,12 +337,12 @@ bool Gather::needPrepareParams() const {
void Gather::prepareParams() {
auto dataMemPtr = getSrcMemoryAtPort(GATHER_DATA);
if (!dataMemPtr || !dataMemPtr->isDefined())
THROW_ERROR(" has undefined input data memory.");
THROW_CPU_NODE_ERR("has undefined input data memory.");
auto idxMemPtr = getSrcMemoryAtPort(GATHER_INDICES);
if (!idxMemPtr || !idxMemPtr->isDefined())
THROW_ERROR(" has undefined input indices memory.");
THROW_CPU_NODE_ERR("has undefined input indices memory.");
if (getSelectedPrimitiveDescriptor() == nullptr)
THROW_ERROR(" has unidentified preferable primitive descriptor.");
THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor.");

// short 1D vector fast execution impl (typical in shape infer subgraph)
canOptimize1DCase = false;
Expand All @@ -363,7 +361,7 @@ void Gather::prepareParams() {
if (axis < 0)
axis += dataSrcRank;
if (axis < 0 || axis >= dataSrcRank || batchDims > axis)
THROW_ERROR("has incorrect input parameter axis value: ", axis);
THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis);
}

if (!isDataShapeStat || !isAxisInputConst) {
Expand Down Expand Up @@ -553,7 +551,7 @@ void Gather::executeDynamicImpl(const dnnl::stream& strm) {

void Gather::initShortParams(threadExecParams& p, const uint64_t start) {
if (!jitKernel)
THROW_ERROR("has uninitialized kernel in function initShortParams.");
THROW_CPU_NODE_ERR("has uninitialized kernel in function initShortParams.");
const uint64_t idxElPerVec = jitKernel->getIdxElPerVec();

if (afterAxisSize == 1) { // Elementwise gather.
Expand Down
10 changes: 5 additions & 5 deletions src/plugins/intel_cpu/src/nodes/gather_elements.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,19 +38,19 @@ GatherElements::GatherElements(const std::shared_ptr<ov::Node>& op, const GraphC
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
if (inputShapes.size() != 2 || outputShapes.size() != 1)
THROW_CPU_NODE_ERR(" has invalid number of input/output edges.");
THROW_CPU_NODE_ERR("has invalid number of input/output edges.");

const auto dataRank = getInputShapeAtPort(dataIndex_).getRank();
const auto indicesRank = getInputShapeAtPort(indicesIndex_).getRank();
if (dataRank != indicesRank)
THROW_CPU_NODE_ERR(" has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks.");
THROW_CPU_NODE_ERR("has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks.");

auto gatherElementsOp = ov::as_type_ptr<ov::op::v6::GatherElements>(op);
auto axis = gatherElementsOp->get_axis();
if (axis < 0)
axis += dataRank;
if (axis < 0 || axis >= static_cast<int>(dataRank))
THROW_CPU_NODE_ERR(" has invalid axis attribute: ", axis);
THROW_CPU_NODE_ERR("has invalid axis attribute: ", axis);
axis_ = axis;
}

Expand Down Expand Up @@ -78,12 +78,12 @@ void GatherElements::initSupportedPrimitiveDescriptors() {
sizeof(element_type_traits<ov::element::i32>::value_type),
sizeof(element_type_traits<ov::element::i16>::value_type),
sizeof(element_type_traits<ov::element::i8>::value_type))) {
THROW_CPU_NODE_ERR(" has unsupported 'inputData' input precision: ", inDataPrecision);
THROW_CPU_NODE_ERR("has unsupported 'inputData' input precision: ", inDataPrecision);
}

ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_);
if (!one_of(indicesPrecision, ov::element::i32, ov::element::i64)) {
THROW_CPU_NODE_ERR(" has unsupported 'indices' input precision: ", indicesPrecision);
THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision);
}

dataTypeSize_ = inDataPrecision.size();
Expand Down
22 changes: 10 additions & 12 deletions src/plugins/intel_cpu/src/nodes/gather_nd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
#include "openvino/core/parallel.hpp"
#include "utils/general_utils.h"

#define THROW_ERROR(...) OPENVINO_THROW("GatherND layer with name '", getName(), "' ", __VA_ARGS__)

namespace ov {
namespace intel_cpu {
namespace node {
Expand Down Expand Up @@ -43,7 +41,7 @@ GatherND::GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr
}

if (inputShapes.size() != 2 && outputShapes.size() != 1)
THROW_ERROR("has invalid number of input/output edges.");
THROW_CPU_NODE_ERR("has invalid number of input/output edges.");

const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank();
const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank();
Expand All @@ -53,10 +51,10 @@ GatherND::GatherND(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr
} else if (auto gatherNdOp = ov::as_type_ptr<const ov::op::v5::GatherND>(op)) {
attrs.batchDims = gatherNdOp->get_batch_dims();
} else {
THROW_ERROR("has support only opset5.");
THROW_CPU_NODE_ERR("has support only opset5.");
}
if (attrs.batchDims >= std::min(dataInputRank, indicesInputRank))
THROW_ERROR("has invalid batch_dims attribute: ", attrs.batchDims);
THROW_CPU_NODE_ERR("has invalid batch_dims attribute: ", attrs.batchDims);
}

void GatherND::initSupportedPrimitiveDescriptors() {
Expand All @@ -68,7 +66,7 @@ void GatherND::initSupportedPrimitiveDescriptors() {
sizeof(element_type_traits<ov::element::i32>::value_type),
sizeof(element_type_traits<ov::element::i16>::value_type),
sizeof(element_type_traits<ov::element::i8>::value_type))) {
THROW_ERROR("has unsupported 'data' input precision: ", inDataPrecision);
THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision);
}
attrs.dataSize = inDataPrecision.size();

Expand All @@ -80,7 +78,7 @@ void GatherND::initSupportedPrimitiveDescriptors() {
ov::element::u16,
ov::element::i8,
ov::element::u8)) {
THROW_ERROR("has unsupported 'indices' input precision: ", indicesPrecision);
THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision);
}

addSupportedPrimDesc({{LayoutType::ncsp, inDataPrecision}, {LayoutType::ncsp, ov::element::i32}},
Expand All @@ -93,13 +91,13 @@ void GatherND::prepareParams() {
auto idxMemPtr = getSrcMemoryAtPort(GATHERND_INDEXES);
auto dstMemPtr = getDstMemoryAtPort(0);
if (!srcMemPtr || !srcMemPtr->isDefined())
THROW_ERROR(" has undefined input memory of 'data'.");
THROW_CPU_NODE_ERR("has undefined input memory of 'data'.");
if (!idxMemPtr || !idxMemPtr->isDefined())
THROW_ERROR(" has undefined input memory of 'indices'.");
THROW_CPU_NODE_ERR("has undefined input memory of 'indices'.");
if (!dstMemPtr || !dstMemPtr->isDefined())
THROW_ERROR(" has undefined output memory.");
THROW_CPU_NODE_ERR("has undefined output memory.");
if (getSelectedPrimitiveDescriptor() == nullptr)
THROW_ERROR(" has unidentified preferable primitive descriptor.");
THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor.");

attrs.srcDims = srcMemPtr->getStaticDims();
attrs.srcStrides = srcMemPtr->getDescWithType<BlockedMemoryDesc>()->getStrides();
Expand Down Expand Up @@ -141,7 +139,7 @@ GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs)

void GatherND::execute(const dnnl::stream& strm) {
if (!execPtr)
THROW_ERROR("has not compiled executor.");
THROW_CPU_NODE_ERR("has not compiled executor.");

execPtr->exec(getSrcMemoryAtPort(GATHERND_DATA), getSrcMemoryAtPort(GATHERND_INDEXES), getDstMemoryAtPort(0));
}
Expand Down
2 changes: 0 additions & 2 deletions src/plugins/intel_cpu/src/nodes/grid_sample.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ using namespace ov::intel_cpu::node;
using namespace dnnl::impl::cpu;
#endif // OPENVINO_ARCH_X86_64

#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)

bool GridSample::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
try {
if (!ov::is_type<op::v9::GridSample>(op)) {
Expand Down
4 changes: 1 addition & 3 deletions src/plugins/intel_cpu/src/nodes/interaction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ namespace ov {
namespace intel_cpu {
namespace node {

#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)

#if defined(OPENVINO_ARCH_X86_64)

template <cpu_isa_t isa>
Expand Down Expand Up @@ -346,7 +344,7 @@ void Interaction::prepareParams() {
moveFeatureKernel->create_ker();
moveInteractKernel->create_ker();
} else {
THROW_ERROR("cannot create jit eltwise kernel");
THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
#ifdef CPU_DEBUG_CAPS
if (prim) {
Expand Down
20 changes: 9 additions & 11 deletions src/plugins/intel_cpu/src/nodes/mha.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@ using namespace dnnl::impl::cpu::x64;
using namespace dnnl::impl::cpu::x64::matmul;
using namespace Xbyak;

#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)

namespace ov {
namespace intel_cpu {
namespace node {
Expand Down Expand Up @@ -879,7 +877,7 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr<brgemm_kernel_t>& brgKerne
ctx.K,
&strides);
if (status != dnnl_success) {
THROW_ERROR("cannot be executed due to invalid brgconv params");
THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params");
}

ctx.is_with_amx = use_amx;
Expand All @@ -893,11 +891,11 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr<brgemm_kernel_t>& brgKerne
brgemm_kernel_t* brgKernel_ = nullptr;
status = brgemm_kernel_create(&brgKernel_, brgDesc);
if (status != dnnl_success) {
THROW_ERROR("cannot be executed due to invalid brgconv params");
THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params");
}
brgKernel.reset(brgKernel_);
#else
THROW_ERROR("is not supported on non-x86_64");
THROW_CPU_NODE_ERR("is not supported on non-x86_64");
#endif // OPENVINO_ARCH_X86_64
}

Expand Down Expand Up @@ -972,7 +970,7 @@ void MHA::init_brgemm_copy_b(std::unique_ptr<jit_brgemm_matmul_copy_b_t>& brgCop
#if defined(OPENVINO_ARCH_X86_64)
auto ret = create_brgemm_matmul_copy_b(brgCopyKernel, &brgCopyKernelConf);
if (ret != dnnl::impl::status_t::dnnl_success)
THROW_ERROR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret);
THROW_CPU_NODE_ERR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret);
#endif // OPENVINO_ARCH_X86_64
}

Expand Down Expand Up @@ -1204,7 +1202,7 @@ void MHA::prepareParams() {
}
#endif // OPENVINO_ARCH_X86_64
if (!mulAddSoftmaxKernel) {
THROW_ERROR("cannot create jit eltwise kernel");
THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}

Expand All @@ -1228,7 +1226,7 @@ void MHA::prepareParams() {
}
#endif // OPENVINO_ARCH_X86_64
if (!convertReorderKernel) {
THROW_ERROR("cannot create jit eltwise kernel");
THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}

Expand All @@ -1255,7 +1253,7 @@ void MHA::prepareParams() {
#endif // OPENVINO_ARCH_X86_64

if (!convertTransposeKernel) {
THROW_ERROR("cannot create jit eltwise kernel");
THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}

Expand Down Expand Up @@ -1312,7 +1310,7 @@ void MHA::callBrgemm(brgemmCtx& ctx,
brgemm_kernel_execute(brgKernel.get(), 1, pin0, pin1, nullptr, pout, wsp);
}
#else
THROW_ERROR("is not supported on non-x64 platforms");
THROW_CPU_NODE_ERR("is not supported on non-x64 platforms");
#endif // OPENVINO_ARCH_X86_64
}

Expand Down Expand Up @@ -1547,7 +1545,7 @@ void MHA::execute(const dnnl::stream& strm) {
} else if (inputPrecisions[1] == ov::element::i8) {
mhaImpl<int8_t>();
} else {
THROW_ERROR("doesn't support provided input precisions");
THROW_CPU_NODE_ERR("doesn't support provided input precisions");
}
}

Expand Down
Loading

0 comments on commit 25f884d

Please sign in to comment.