From dfeebbdc5aecbeb666e497a431517d11bb51f7f2 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Sat, 10 Feb 2024 19:42:11 +0100 Subject: [PATCH 01/25] Replace PT FE AlignTypes with opset14 ConvertPromoteTypes --- src/frontends/pytorch/src/frontend.cpp | 8 +- .../pytorch/src/helper_ops/align_types.hpp | 43 --------- src/frontends/pytorch/src/op/add.cpp | 4 +- src/frontends/pytorch/src/op/bitwise.cpp | 6 +- src/frontends/pytorch/src/op/cross.cpp | 6 +- src/frontends/pytorch/src/op/distance.cpp | 4 +- src/frontends/pytorch/src/op/div.cpp | 2 +- src/frontends/pytorch/src/op/floor_divide.cpp | 4 +- src/frontends/pytorch/src/op/fmod.cpp | 4 +- src/frontends/pytorch/src/op/gcd.cpp | 2 +- src/frontends/pytorch/src/op/min_max.cpp | 8 +- src/frontends/pytorch/src/op/outer.cpp | 4 +- src/frontends/pytorch/src/op/pow.cpp | 2 +- src/frontends/pytorch/src/op/remainder.cpp | 2 +- src/frontends/pytorch/src/op/rsqrt.cpp | 4 +- src/frontends/pytorch/src/op/where.cpp | 4 +- .../src/transforms/align_types_removal.cpp | 60 ------------ .../src/transforms/align_types_removal.hpp | 24 ----- src/frontends/pytorch/src/utils.cpp | 95 +------------------ src/frontends/pytorch/src/utils.hpp | 8 +- 20 files changed, 38 insertions(+), 256 deletions(-) delete mode 100644 src/frontends/pytorch/src/helper_ops/align_types.hpp delete mode 100644 src/frontends/pytorch/src/transforms/align_types_removal.cpp delete mode 100644 src/frontends/pytorch/src/transforms/align_types_removal.hpp diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 03835b72935327..bd796001364bf6 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -19,9 +19,9 @@ #include "transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp" #include "transformations/low_precision/mark_dequantization_subgraph.hpp" #include "transformations/op_conversions/convert_convertlike.hpp" +#include "transformations/op_conversions/convert_convertpromotetypes.hpp" #include "transformations/resolve_names_collisions.hpp" #include "transforms.hpp" -#include "transforms/align_types_removal.hpp" #include "transforms/append_list_unpack_replacer.hpp" #include "transforms/aten_cat_replacer.hpp" #include "transforms/aten_getitem_replacer.hpp" @@ -185,7 +185,7 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); @@ -218,8 +218,8 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); manager.register_pass(); - // Second pass of AlignTypesRemoval after all converting transformations - manager.register_pass(); + // Second pass of ConvertConvertPromoteTypes after all converting transformations + manager.register_pass(); manager.register_pass(true); manager.run_passes(model); diff --git a/src/frontends/pytorch/src/helper_ops/align_types.hpp b/src/frontends/pytorch/src/helper_ops/align_types.hpp deleted file mode 100644 index cd69af250fa30d..00000000000000 --- a/src/frontends/pytorch/src/helper_ops/align_types.hpp +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include "internal_op.hpp" -#include "openvino/frontend/decoder.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { - -class AlignTypes : public InternalOperation { -public: - AlignTypes(const Output& lhs, const Output& rhs, bool align_scalars) - : InternalOperation("ov::align_types", - {lhs, rhs}, - 2, - "This is internal operation for type alignment and should be removed " - "at normalization step. It can't be removed if types can't be resolved."), - m_align_scalars(align_scalars) { - validate_and_infer_types(); - } - - void validate_and_infer_types() override { - auto lhs = input_value(0); - auto rhs = input_value(1); - auto out_type = infer_types(lhs, rhs, m_align_scalars); - set_output_type(0, out_type, get_input_partial_shape(0)); - set_output_type(1, out_type, get_input_partial_shape(1)); - } - -private: - const bool m_align_scalars; -}; -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index 8ea9782838e9ff..97cd8cbe735829 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -32,7 +32,7 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) rhs = context.mark_node(std::make_shared(rhs, lhs)); } else { - align_eltwise_input_types(context, lhs, rhs, true); + align_eltwise_input_types(context, lhs, rhs); } Output alpha; if (!context.input_is_none(2)) { @@ -61,4 +61,4 @@ OutputVector translate_add_(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/bitwise.cpp b/src/frontends/pytorch/src/op/bitwise.cpp index 84465502969d81..9cda0af46b2ca2 100644 --- a/src/frontends/pytorch/src/op/bitwise.cpp +++ b/src/frontends/pytorch/src/op/bitwise.cpp @@ -28,7 +28,7 @@ OutputVector translate_bitwise_and(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, false); + align_eltwise_input_types(context, x, y); auto and_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, and_x); @@ -40,7 +40,7 @@ OutputVector translate_bitwise_or(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, false); + align_eltwise_input_types(context, x, y); auto or_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, or_x); @@ -52,7 +52,7 @@ OutputVector translate_bitwise_xor(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, false); + align_eltwise_input_types(context, x, y); auto xor_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, xor_x); diff --git a/src/frontends/pytorch/src/op/cross.cpp b/src/frontends/pytorch/src/op/cross.cpp index 06392a14e0e3c7..6a724da2e93a57 100644 --- a/src/frontends/pytorch/src/op/cross.cpp +++ b/src/frontends/pytorch/src/op/cross.cpp @@ -37,7 +37,7 @@ OutputVector translate_linalg_cross(const NodeContext& context) { num_inputs_check(context, 3, 4); auto self = context.get_input(0); auto other = context.get_input(1); - align_eltwise_input_types(context, self, other, true); + align_eltwise_input_types(context, self, other); auto const_minus_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); Output dim; if (context.input_is_none(2)) { @@ -61,7 +61,7 @@ OutputVector translate_cross(const NodeContext& context) { num_inputs_check(context, 3, 4); auto self = context.get_input(0); auto other = context.get_input(1); - align_eltwise_input_types(context, self, other, true); + align_eltwise_input_types(context, self, other); Output dim; if (context.input_is_none(2)) { // If dim is not given, it defaults to the first dimension found with the size 3 @@ -98,4 +98,4 @@ OutputVector translate_cross(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/distance.cpp b/src/frontends/pytorch/src/op/distance.cpp index 6b67b303f4653f..bc330d07d52a04 100644 --- a/src/frontends/pytorch/src/op/distance.cpp +++ b/src/frontends/pytorch/src/op/distance.cpp @@ -33,7 +33,7 @@ Output pairwise_distance(const NodeContext& context, auto p_plus_eps = context.mark_node(std::make_shared(p, eps)); auto inv_p = context.mark_node(std::make_shared(one, p_plus_eps)); auto minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto x_y_diff = context.mark_node(std::make_shared(x, y)); auto x_y_diff_in_p_power = context.mark_node(std::make_shared(x_y_diff, p)); auto summation = context.mark_node(std::make_shared(x_y_diff_in_p_power, minus_one, keepdim)); @@ -91,4 +91,4 @@ OutputVector translate_pairwise_distance(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/div.cpp b/src/frontends/pytorch/src/op/div.cpp index 477c9ced2bb462..ea32821c0e2fe3 100644 --- a/src/frontends/pytorch/src/op/div.cpp +++ b/src/frontends/pytorch/src/op/div.cpp @@ -37,7 +37,7 @@ OutputVector translate_div_common(const NodeContext& context, if (x.get_element_type().is_dynamic() || x.get_element_type() != y.get_element_type()) y = context.mark_node(std::make_shared(y, x)); } else { - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); } auto res = context.mark_node(std::make_shared(x, y, true)); // TODO: ticket 103296; Temporarily disable ConvertDivide transformation diff --git a/src/frontends/pytorch/src/op/floor_divide.cpp b/src/frontends/pytorch/src/op/floor_divide.cpp index 8e9eb8a44f60b8..5c0af45c57598c 100644 --- a/src/frontends/pytorch/src/op/floor_divide.cpp +++ b/src/frontends/pytorch/src/op/floor_divide.cpp @@ -19,7 +19,7 @@ OutputVector translate_floor_divide(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto div = context.mark_node(std::make_shared(x, y, true)); return {context.mark_node(std::make_shared(div))}; }; @@ -27,4 +27,4 @@ OutputVector translate_floor_divide(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/fmod.cpp b/src/frontends/pytorch/src/op/fmod.cpp index 48d031e647d3d6..517caa36887839 100644 --- a/src/frontends/pytorch/src/op/fmod.cpp +++ b/src/frontends/pytorch/src/op/fmod.cpp @@ -16,7 +16,7 @@ OutputVector translate_fmod(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { @@ -28,4 +28,4 @@ OutputVector translate_fmod(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/gcd.cpp b/src/frontends/pytorch/src/op/gcd.cpp index 70301185afcb7a..dae9ae12033233 100644 --- a/src/frontends/pytorch/src/op/gcd.cpp +++ b/src/frontends/pytorch/src/op/gcd.cpp @@ -25,7 +25,7 @@ OutputVector translate_gcd(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto zero_i32 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto trip_count = std::make_shared(element::i32, Shape{}, 1000); diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp index 928b963f074b31..8600907afc648d 100644 --- a/src/frontends/pytorch/src/op/min_max.cpp +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -33,7 +33,7 @@ OutputVector translate_max(const NodeContext& context) { // torch.max(input, other) if (context.input_is_none(2)) { auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); return {context.mark_node(std::make_shared(x, y))}; } // torch.max(input, dim, keepdim), returns values and indicies @@ -64,7 +64,7 @@ OutputVector translate_min(const NodeContext& context) { // torch.min(input, other) if (context.input_is_none(2)) { auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); return {context.mark_node(std::make_shared(x, y))}; } // torch.min(input, dim, keepdim), returns values and indicies @@ -90,7 +90,7 @@ OutputVector translate_maximum(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); @@ -106,7 +106,7 @@ OutputVector translate_minimum(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); diff --git a/src/frontends/pytorch/src/op/outer.cpp b/src/frontends/pytorch/src/op/outer.cpp index f1d151ffa9ebde..e9af9e98d52e84 100644 --- a/src/frontends/pytorch/src/op/outer.cpp +++ b/src/frontends/pytorch/src/op/outer.cpp @@ -22,7 +22,7 @@ OutputVector translate_outer(const NodeContext& context) { num_inputs_check(context, 2, 3); auto vec1 = context.get_input(0); auto vec2 = context.get_input(1); - align_eltwise_input_types(context, vec1, vec2, true); + align_eltwise_input_types(context, vec1, vec2); auto const_zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); auto const_minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); vec1 = context.mark_node(std::make_shared(vec1, const_minus_one)); @@ -38,4 +38,4 @@ OutputVector translate_outer(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/pow.cpp b/src/frontends/pytorch/src/op/pow.cpp index ebdfb8c57155e0..572faa0044b934 100644 --- a/src/frontends/pytorch/src/op/pow.cpp +++ b/src/frontends/pytorch/src/op/pow.cpp @@ -19,7 +19,7 @@ OutputVector translate_pow(const NodeContext& context) { if (inplace) { rhs = std::make_shared(rhs, lhs); } else { - align_eltwise_input_types(context, lhs, rhs, true); + align_eltwise_input_types(context, lhs, rhs); } auto res = context.mark_node(std::make_shared(lhs, rhs)); if (inplace) { diff --git a/src/frontends/pytorch/src/op/remainder.cpp b/src/frontends/pytorch/src/op/remainder.cpp index ce9e496e9770b2..384d86ca9043f8 100644 --- a/src/frontends/pytorch/src/op/remainder.cpp +++ b/src/frontends/pytorch/src/op/remainder.cpp @@ -17,7 +17,7 @@ OutputVector translate_remainder(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); return {context.mark_node(std::make_shared(x, y))}; }; diff --git a/src/frontends/pytorch/src/op/rsqrt.cpp b/src/frontends/pytorch/src/op/rsqrt.cpp index b17f66bb9f572a..4a7d4b3ca6630e 100644 --- a/src/frontends/pytorch/src/op/rsqrt.cpp +++ b/src/frontends/pytorch/src/op/rsqrt.cpp @@ -21,7 +21,7 @@ OutputVector translate_rsqrt(const NodeContext& context) { auto data = context.get_input(0); auto one_const = context.mark_node(v0::Constant::create(element::f32, Shape({}), {1})); Output fake_const_for_type = context.mark_node(v0::Constant::create(element::f32, Shape({}), {.5})); - align_eltwise_input_types(context, data, fake_const_for_type, true); + align_eltwise_input_types(context, data, fake_const_for_type); auto one_const_casted = context.mark_node(std::make_shared(one_const, data)); auto sqrt_data = context.mark_node(std::make_shared(data)); return {context.mark_node(std::make_shared(one_const_casted, sqrt_data))}; @@ -30,4 +30,4 @@ OutputVector translate_rsqrt(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index 3c8bf86d84adda..af19c25514a29b 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -21,11 +21,11 @@ OutputVector translate_where(const NodeContext& context) { auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); auto x = context.get_input(1); auto y = context.get_input(2); - align_eltwise_input_types(context, x, y, true); + align_eltwise_input_types(context, x, y); return {context.mark_node(std::make_shared(bool_cond, x, y))}; }; } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/align_types_removal.cpp b/src/frontends/pytorch/src/transforms/align_types_removal.cpp deleted file mode 100644 index c5e43d8af13004..00000000000000 --- a/src/frontends/pytorch/src/transforms/align_types_removal.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "align_types_removal.hpp" - -#include -#include - -#include "helper_ops/align_types.hpp" -#include "openvino/core/rt_info.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/split.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/util/framework_node.hpp" -#include "openvino/pass/pattern/matcher.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -using namespace ov::op; - -AlignTypesRemoval::AlignTypesRemoval() { - auto align_types_pattern = ov::pass::pattern::wrap_type(); - - ov::matcher_pass_callback callback = [](ov::pass::pattern::Matcher& m) { - auto align_types = std::dynamic_pointer_cast(m.get_match_root()); - if (!align_types) - return false; - auto lhs_itype = align_types->get_input_element_type(0); - auto rhs_itype = align_types->get_input_element_type(1); - auto lhs_otype = align_types->get_output_element_type(0); - auto rhs_otype = align_types->get_output_element_type(1); - if (lhs_otype.is_static() && rhs_otype.is_static()) { - auto out1 = align_types->input_value(0); - auto out2 = align_types->input_value(1); - if (lhs_itype != lhs_otype) - out1 = std::make_shared(align_types->input_value(0), lhs_otype); - if (rhs_itype != rhs_otype) - out2 = std::make_shared(align_types->input_value(1), rhs_otype); - align_types->output(0).replace(out1); - align_types->output(1).replace(out2); - return true; - } - return false; - }; - - auto m = std::make_shared(align_types_pattern, - "ov::frontend::pytorch::pass::AlignTypesRemoval"); - this->register_matcher(m, callback); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/align_types_removal.hpp b/src/frontends/pytorch/src/transforms/align_types_removal.hpp deleted file mode 100644 index bba81df9e0e086..00000000000000 --- a/src/frontends/pytorch/src/transforms/align_types_removal.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "openvino/pass/pass.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -class AlignTypesRemoval : public ov::pass::MatcherPass { -public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::AlignTypesRemoval"); - AlignTypesRemoval(); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 32f62eed603d47..7e1fd7684f42e2 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -4,10 +4,10 @@ #include "utils.hpp" -#include "helper_ops/align_types.hpp" #include "op_table.hpp" #include "openvino/core/rt_info.hpp" #include "openvino/frontend/pytorch/decoder.hpp" +#include "openvino/op/convert_promote_types.hpp" #include "openvino/opsets/opset10.hpp" #include "openvino/util/log.hpp" #include "pt_framework_node.hpp" @@ -399,100 +399,13 @@ std::unordered_map bit_to_int{ }; } // namespace -element::Type infer_types(const Output& lhs, const Output& rhs, bool align_scalars) { - const auto& lhs_type = lhs.get_element_type(); - const auto& rhs_type = rhs.get_element_type(); - if (lhs_type.is_dynamic() || rhs_type.is_dynamic()) { - return element::dynamic; - } - - // Both types are static, align types. If float and int types are used convert int type to f32, after that align - // to the largest bitness, if both float or both int, just align bitness - if (lhs_type == rhs_type) - return lhs_type; - - // if one of operands is scalar, the resulting type is taken from the other operand except when scalar is float - // type and other operand is int, in that case BOTH operands get fp32 type - const auto& lhs_rank = lhs.get_partial_shape().rank(); - const auto& rhs_rank = rhs.get_partial_shape().rank(); - // consider dynamic rank as non scalar - const auto is_lhs_scalar = lhs_rank.is_static() && lhs_rank.get_length() == 0; - const auto is_rhs_scalar = rhs_rank.is_static() && rhs_rank.get_length() == 0; - auto lhs_dst_type = lhs_type; - auto rhs_dst_type = rhs_type; - if (is_lhs_scalar && lhs_type.is_real() && !rhs_type.is_real()) { - // if div we need to also align float types to highest bitness regardless of scalar - if (!align_scalars) - lhs_dst_type = element::f32; - rhs_dst_type = lhs_type; - } else if (is_rhs_scalar && !lhs_type.is_real() && rhs_type.is_real()) { - lhs_dst_type = rhs_type; - // if div we need to also align float types to highest bitness regardless of scalar - if (!align_scalars) - rhs_dst_type = element::f32; - } else if (is_lhs_scalar && rhs_type != element::boolean) { - return rhs_type; - } else if (is_rhs_scalar && lhs_type != element::boolean) { - return lhs_type; - } - - if (!lhs_dst_type.is_real() && rhs_dst_type.is_real()) { - lhs_dst_type = rhs_dst_type; - } else if (lhs_dst_type.is_real() && !rhs_dst_type.is_real()) { - rhs_dst_type = lhs_dst_type; - } - // Align bool to other type - if (lhs_dst_type == element::boolean) { - lhs_dst_type = rhs_dst_type; - } else if (rhs_dst_type == element::boolean) { - rhs_dst_type = lhs_dst_type; - } - // At this point we either have both floating point type or both integer type. Align bitness to higher - if (lhs_dst_type != rhs_dst_type) { - auto dst_bitness = std::max(lhs_dst_type.bitwidth(), rhs_dst_type.bitwidth()); - // If integer type are mixed signed+unsigned align to next bitness - if (dst_bitness < 64 && lhs_dst_type.is_integral() && lhs_dst_type.is_integral() && - lhs_dst_type.bitwidth() == rhs_dst_type.bitwidth() && lhs_dst_type != rhs_dst_type) { - dst_bitness *= 2; - } - if (lhs_dst_type.bitwidth() != dst_bitness) { - if (lhs_dst_type.is_real()) { - lhs_dst_type = bit_to_float.at(dst_bitness); - } else { - lhs_dst_type = bit_to_int.at(dst_bitness); - } - } - if (rhs_dst_type.bitwidth() != dst_bitness) { - if (rhs_dst_type.is_real()) { - rhs_dst_type = bit_to_float.at(dst_bitness); - } else { - rhs_dst_type = bit_to_int.at(dst_bitness); - } - } - } - return lhs_dst_type; -} - -void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs, bool align_scalars) { +void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs) { const auto& lhs_type = lhs.get_element_type(); const auto& rhs_type = rhs.get_element_type(); auto out_type = context.get_output_type(0); - if (out_type.is()) { - auto otype = out_type.as(); - if (otype.is_real()) { - if (otype != lhs_type) { - lhs = context.mark_node(std::make_shared(lhs, otype)); - } - if (otype != rhs_type) { - rhs = context.mark_node(std::make_shared(rhs, otype)); - } - return; - } - } - auto dst_type = infer_types(lhs, rhs, align_scalars); + auto at = std::make_shared(lhs, rhs, true, true, element::f32); + auto dst_type = at->get_output_element_type(0); if (dst_type.is_dynamic()) { - // We can't decide the type at this point, create a special operation - auto at = std::make_shared(lhs, rhs, align_scalars); lhs = at->output(0); rhs = at->output(1); return; diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index f7387bd6adaa61..c5bbdae559f770 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -88,11 +88,7 @@ Any simplified_type_interpret(Any type); void add_exception_to_fw_node(std::shared_ptr node, const std::string& msg); -element::Type infer_types(const Output& lhs, const Output& rhs, bool align_scalars); -void align_eltwise_input_types(const NodeContext& context, - Output& lhs, - Output& rhs, - bool align_scalars = false); +void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs); void align_output_types(const NodeContext& context, OutputVector& outputs); std::deque> get_list_as_outputs(const Output& start); @@ -159,7 +155,7 @@ OutputVector translate_1to1_match_2_inputs_align_types(const NodeContext& contex // If type is string or None, we shouldn't align if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && !rhs_type.is()) - align_eltwise_input_types(context, lhs, rhs, true); + align_eltwise_input_types(context, lhs, rhs); OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; align_output_types(context, res); return res; From 9805aa22b35728c4f7b68881aea93c013cb4abef Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 15 Feb 2024 11:48:52 +0100 Subject: [PATCH 02/25] PT FE support PyScalar type promotion --- .../openvino/frontend/pytorch/ts_decoder.py | 4 +- .../src/openvino/frontend/pytorch/utils.py | 2 +- .../pyopenvino/frontend/pytorch/decoder.cpp | 2 + .../python/src/pyopenvino/utils/utils.cpp | 4 + .../include/openvino/frontend/decoder.hpp | 6 + src/frontends/pytorch/src/op/add.cpp | 6 +- src/frontends/pytorch/src/op/bitwise.cpp | 6 +- src/frontends/pytorch/src/op/cross.cpp | 12 +- src/frontends/pytorch/src/op/div.cpp | 6 +- src/frontends/pytorch/src/op/floor_divide.cpp | 2 +- src/frontends/pytorch/src/op/fmod.cpp | 2 +- src/frontends/pytorch/src/op/gcd.cpp | 2 +- src/frontends/pytorch/src/op/min_max.cpp | 16 ++- src/frontends/pytorch/src/op/outer.cpp | 6 +- src/frontends/pytorch/src/op/pow.cpp | 6 +- src/frontends/pytorch/src/op/remainder.cpp | 2 +- src/frontends/pytorch/src/op/rsub.cpp | 12 +- src/frontends/pytorch/src/op/sub.cpp | 10 +- src/frontends/pytorch/src/op/where.cpp | 2 +- src/frontends/pytorch/src/utils.cpp | 53 ++++--- src/frontends/pytorch/src/utils.hpp | 14 +- .../py_frontend_tests/test_torch_frontend.py | 136 ++++++++++++++++-- 22 files changed, 250 insertions(+), 61 deletions(-) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 4c8356b59228ba..cae1db1531c61a 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -185,7 +185,9 @@ def _get_known_type_for_value(self, pt_type): if pt_type is None: return OVAny(OVType.dynamic) # TODO: Don't use str, use native types - if str(pt_type) in pt_to_ov_type_map: + if str(pt_type) in ["int", "float"]: + return OVAny(DecoderType.PyScalar(OVAny(pt_to_ov_type_map[str(pt_type)]))) + elif str(pt_type) in pt_to_ov_type_map: return OVAny(pt_to_ov_type_map[str(pt_type)]) elif isinstance(pt_type, torch.TensorType): # Tensor type, parse element type diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index 157592f3aabee0..33f6e81683f40c 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -123,7 +123,7 @@ def graph_has_ops(graph, op_types:list) -> bool: pt_to_ov_type_map = { "float": OVType.f32, - "int": OVType.i32, + "int": OVType.i64, "bool": OVType.boolean, "torch.bfloat16": OVType.bf16, "torch.float16": OVType.f16, diff --git a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp index 260c0ba69c3a0f..a26afd2655edb2 100644 --- a/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/pytorch/decoder.cpp @@ -32,4 +32,6 @@ void regclass_frontend_pytorch_decoder(py::module m) { def(py::init<>()); py::class_(type_module, "PyNone"). def(py::init<>()); + py::class_(type_module, "PyScalar"). + def(py::init()); } diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index 52f7099530523f..8f536045a9cf73 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -205,6 +205,8 @@ py::object from_ov_any(const ov::Any& any) { return py::cast(any.as()); } else if (any.is()) { return py::cast(any.as()); + } else if (any.is()) { + return py::cast(any.as()); } else { PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); return py::cast((PyObject*)NULL); @@ -394,6 +396,8 @@ ov::Any py_object_to_any(const py::object& py_obj) { return py::cast(py_obj); } else if (py::isinstance(py_obj)) { return py::cast(py_obj); + } else if (py::isinstance(py_obj)) { + return py::cast(py_obj); // If there is no match fallback to py::object } else if (py::isinstance(py_obj)) { return py_obj; diff --git a/src/frontends/common/include/openvino/frontend/decoder.hpp b/src/frontends/common/include/openvino/frontend/decoder.hpp index ecf06f0dbf6c5a..d5dad9c848810d 100644 --- a/src/frontends/common/include/openvino/frontend/decoder.hpp +++ b/src/frontends/common/include/openvino/frontend/decoder.hpp @@ -35,6 +35,12 @@ struct Str {}; struct PyNone {}; +struct PyScalar { + PyScalar() = default; + explicit PyScalar(const Any& _element_type) : element_type(_element_type) {} + Any element_type; +}; + struct Optional; struct Dict; struct NamedTuple; diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index 97cd8cbe735829..6a73e86e1db084 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -32,7 +32,11 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) rhs = context.mark_node(std::make_shared(rhs, lhs)); } else { - align_eltwise_input_types(context, lhs, rhs); + align_eltwise_input_types(context, + lhs, + rhs, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); } Output alpha; if (!context.input_is_none(2)) { diff --git a/src/frontends/pytorch/src/op/bitwise.cpp b/src/frontends/pytorch/src/op/bitwise.cpp index 9cda0af46b2ca2..89d7f4c3de94f6 100644 --- a/src/frontends/pytorch/src/op/bitwise.cpp +++ b/src/frontends/pytorch/src/op/bitwise.cpp @@ -28,7 +28,7 @@ OutputVector translate_bitwise_and(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto and_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, and_x); @@ -40,7 +40,7 @@ OutputVector translate_bitwise_or(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto or_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, or_x); @@ -52,7 +52,7 @@ OutputVector translate_bitwise_xor(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto xor_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, xor_x); diff --git a/src/frontends/pytorch/src/op/cross.cpp b/src/frontends/pytorch/src/op/cross.cpp index 6a724da2e93a57..5a33badcc905fa 100644 --- a/src/frontends/pytorch/src/op/cross.cpp +++ b/src/frontends/pytorch/src/op/cross.cpp @@ -37,7 +37,11 @@ OutputVector translate_linalg_cross(const NodeContext& context) { num_inputs_check(context, 3, 4); auto self = context.get_input(0); auto other = context.get_input(1); - align_eltwise_input_types(context, self, other); + align_eltwise_input_types(context, + self, + other, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); auto const_minus_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); Output dim; if (context.input_is_none(2)) { @@ -61,7 +65,11 @@ OutputVector translate_cross(const NodeContext& context) { num_inputs_check(context, 3, 4); auto self = context.get_input(0); auto other = context.get_input(1); - align_eltwise_input_types(context, self, other); + align_eltwise_input_types(context, + self, + other, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); Output dim; if (context.input_is_none(2)) { // If dim is not given, it defaults to the first dimension found with the size 3 diff --git a/src/frontends/pytorch/src/op/div.cpp b/src/frontends/pytorch/src/op/div.cpp index ea32821c0e2fe3..9066c65714782b 100644 --- a/src/frontends/pytorch/src/op/div.cpp +++ b/src/frontends/pytorch/src/op/div.cpp @@ -37,7 +37,11 @@ OutputVector translate_div_common(const NodeContext& context, if (x.get_element_type().is_dynamic() || x.get_element_type() != y.get_element_type()) y = context.mark_node(std::make_shared(y, x)); } else { - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, + x, + y, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); } auto res = context.mark_node(std::make_shared(x, y, true)); // TODO: ticket 103296; Temporarily disable ConvertDivide transformation diff --git a/src/frontends/pytorch/src/op/floor_divide.cpp b/src/frontends/pytorch/src/op/floor_divide.cpp index 5c0af45c57598c..7864e72fbfcb7a 100644 --- a/src/frontends/pytorch/src/op/floor_divide.cpp +++ b/src/frontends/pytorch/src/op/floor_divide.cpp @@ -19,7 +19,7 @@ OutputVector translate_floor_divide(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto div = context.mark_node(std::make_shared(x, y, true)); return {context.mark_node(std::make_shared(div))}; }; diff --git a/src/frontends/pytorch/src/op/fmod.cpp b/src/frontends/pytorch/src/op/fmod.cpp index 517caa36887839..f60d2cd64b6f1b 100644 --- a/src/frontends/pytorch/src/op/fmod.cpp +++ b/src/frontends/pytorch/src/op/fmod.cpp @@ -16,7 +16,7 @@ OutputVector translate_fmod(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { diff --git a/src/frontends/pytorch/src/op/gcd.cpp b/src/frontends/pytorch/src/op/gcd.cpp index dae9ae12033233..34b43b87512e11 100644 --- a/src/frontends/pytorch/src/op/gcd.cpp +++ b/src/frontends/pytorch/src/op/gcd.cpp @@ -25,7 +25,7 @@ OutputVector translate_gcd(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto zero_i32 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto trip_count = std::make_shared(element::i32, Shape{}, 1000); diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp index 8600907afc648d..004e9a4df01364 100644 --- a/src/frontends/pytorch/src/op/min_max.cpp +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -33,7 +33,11 @@ OutputVector translate_max(const NodeContext& context) { // torch.max(input, other) if (context.input_is_none(2)) { auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, + x, + y, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); return {context.mark_node(std::make_shared(x, y))}; } // torch.max(input, dim, keepdim), returns values and indicies @@ -64,7 +68,11 @@ OutputVector translate_min(const NodeContext& context) { // torch.min(input, other) if (context.input_is_none(2)) { auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, + x, + y, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); return {context.mark_node(std::make_shared(x, y))}; } // torch.min(input, dim, keepdim), returns values and indicies @@ -90,7 +98,7 @@ OutputVector translate_maximum(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); @@ -106,7 +114,7 @@ OutputVector translate_minimum(const NodeContext& context) { num_inputs_check(context, 2, 3); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); diff --git a/src/frontends/pytorch/src/op/outer.cpp b/src/frontends/pytorch/src/op/outer.cpp index e9af9e98d52e84..2f9db3e9f02cc0 100644 --- a/src/frontends/pytorch/src/op/outer.cpp +++ b/src/frontends/pytorch/src/op/outer.cpp @@ -22,7 +22,11 @@ OutputVector translate_outer(const NodeContext& context) { num_inputs_check(context, 2, 3); auto vec1 = context.get_input(0); auto vec2 = context.get_input(1); - align_eltwise_input_types(context, vec1, vec2); + align_eltwise_input_types(context, + vec1, + vec2, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); auto const_zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); auto const_minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); vec1 = context.mark_node(std::make_shared(vec1, const_minus_one)); diff --git a/src/frontends/pytorch/src/op/pow.cpp b/src/frontends/pytorch/src/op/pow.cpp index 572faa0044b934..4b9335489b5345 100644 --- a/src/frontends/pytorch/src/op/pow.cpp +++ b/src/frontends/pytorch/src/op/pow.cpp @@ -19,7 +19,11 @@ OutputVector translate_pow(const NodeContext& context) { if (inplace) { rhs = std::make_shared(rhs, lhs); } else { - align_eltwise_input_types(context, lhs, rhs); + align_eltwise_input_types(context, + lhs, + rhs, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); } auto res = context.mark_node(std::make_shared(lhs, rhs)); if (inplace) { diff --git a/src/frontends/pytorch/src/op/remainder.cpp b/src/frontends/pytorch/src/op/remainder.cpp index 384d86ca9043f8..7c3fc389df24e0 100644 --- a/src/frontends/pytorch/src/op/remainder.cpp +++ b/src/frontends/pytorch/src/op/remainder.cpp @@ -17,7 +17,7 @@ OutputVector translate_remainder(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); return {context.mark_node(std::make_shared(x, y))}; }; diff --git a/src/frontends/pytorch/src/op/rsub.cpp b/src/frontends/pytorch/src/op/rsub.cpp index 30c9c25698229d..cad75f7740e740 100644 --- a/src/frontends/pytorch/src/op/rsub.cpp +++ b/src/frontends/pytorch/src/op/rsub.cpp @@ -21,13 +21,21 @@ OutputVector translate_rsub(const NodeContext& context) { auto other = context.get_input(1); if (!context.input_is_none(2)) { auto alpha = context.get_input(2); - align_eltwise_input_types(context, self, other); + align_eltwise_input_types(context, + self, + other, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); // reverse aten::sub other - self * alpha auto alpha_casted = context.mark_node(std::make_shared(alpha, self)); auto alpha_mul = context.mark_node(std::make_shared(self, alpha_casted)); return {context.mark_node(std::make_shared(other, alpha_mul))}; } - align_eltwise_input_types(context, self, other); + align_eltwise_input_types(context, + self, + other, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); return {context.mark_node(std::make_shared(other, self))}; }; diff --git a/src/frontends/pytorch/src/op/sub.cpp b/src/frontends/pytorch/src/op/sub.cpp index 69148f23329d4d..e7d048a0055da8 100644 --- a/src/frontends/pytorch/src/op/sub.cpp +++ b/src/frontends/pytorch/src/op/sub.cpp @@ -23,7 +23,11 @@ OutputVector translate_sub_common(const NodeContext& context, bool inplace) { if (x.get_element_type().is_dynamic() || x.get_element_type() != y.get_element_type()) y = context.mark_node(std::make_shared(y, x)); } else { - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, + x, + y, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); } // default alpha is 1 so no need to multiply if alpha is not provided if (!context.input_is_none(2)) { @@ -49,7 +53,7 @@ OutputVector translate_sub_fx(const NodeContext& context) { num_inputs_check(context, 2, 2); auto x = context.get_input(0); auto y = context.get_input(1); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); // default alpha is 1 so no need to multiply if alpha is not provided if (context.has_attribute("alpha")) { auto alpha = context.get_attribute>("alpha"); @@ -62,4 +66,4 @@ OutputVector translate_sub_fx(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index af19c25514a29b..db51f04d0e4edb 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -21,7 +21,7 @@ OutputVector translate_where(const NodeContext& context) { auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); auto x = context.get_input(1); auto y = context.get_input(2); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 1), is_python_scalar_input(context, 2)); return {context.mark_node(std::make_shared(bool_cond, x, y))}; }; diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 7e1fd7684f42e2..98ebc2f3eafe71 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -384,39 +384,46 @@ Any simplified_type_interpret(Any type) { return type; } -namespace { -std::unordered_map bit_to_float{ - {16, element::f16}, - {32, element::f32}, - {64, element::f64}, -}; -std::unordered_map bit_to_int{ - // {4, element::i4}, torch don't have int4 - {8, element::i8}, - {16, element::i16}, - {32, element::i32}, - {64, element::i64}, -}; -} // namespace +bool is_python_scalar_input(const NodeContext& context, size_t index) { + return context.get_input_type(index).is(); +} -void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs) { +void align_eltwise_input_types(const NodeContext& context, + Output& lhs, + Output& rhs, + const bool& is_lhs_python_scalar, + const bool& ir_rhs_python_scalar) { const auto& lhs_type = lhs.get_element_type(); const auto& rhs_type = rhs.get_element_type(); + auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto out_type = context.get_output_type(0); - auto at = std::make_shared(lhs, rhs, true, true, element::f32); + if (is_lhs_python_scalar && !ir_rhs_python_scalar) { + rhs = context.mark_node(std::make_shared(rhs, const_0)); + } else if (!is_lhs_python_scalar && ir_rhs_python_scalar) { + lhs = context.mark_node(std::make_shared(lhs, const_0)); + } + + auto at = context.mark_node(std::make_shared(lhs, rhs, true, true, element::f32)); auto dst_type = at->get_output_element_type(0); if (dst_type.is_dynamic()) { lhs = at->output(0); rhs = at->output(1); - return; - } - // Cast to destination type - if (dst_type != lhs_type) { - lhs = context.mark_node(std::make_shared(lhs, dst_type)); + } else { + // Cast to destination type + if (dst_type != lhs_type) { + lhs = context.mark_node(std::make_shared(lhs, dst_type)); + } + if (dst_type != rhs_type) { + rhs = context.mark_node(std::make_shared(rhs, dst_type)); + } } - if (dst_type != rhs_type) { - rhs = context.mark_node(std::make_shared(rhs, dst_type)); + + if (is_lhs_python_scalar && !ir_rhs_python_scalar) { + rhs = context.mark_node(std::make_shared(rhs, const_0)); + } else if (!is_lhs_python_scalar && ir_rhs_python_scalar) { + lhs = context.mark_node(std::make_shared(lhs, const_0)); } + return; } void align_output_types(const NodeContext& context, OutputVector& outputs) { diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index c5bbdae559f770..12f50e39f17a99 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -88,7 +88,13 @@ Any simplified_type_interpret(Any type); void add_exception_to_fw_node(std::shared_ptr node, const std::string& msg); -void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs); +bool is_python_scalar_input(const NodeContext& context, size_t index); + +void align_eltwise_input_types(const NodeContext& context, + Output& lhs, + Output& rhs, + const bool& is_lhs_python_scalar = false, + const bool& ir_rhs_python_scalar = false); void align_output_types(const NodeContext& context, OutputVector& outputs); std::deque> get_list_as_outputs(const Output& start); @@ -155,7 +161,11 @@ OutputVector translate_1to1_match_2_inputs_align_types(const NodeContext& contex // If type is string or None, we shouldn't align if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && !rhs_type.is()) - align_eltwise_input_types(context, lhs, rhs); + align_eltwise_input_types(context, + lhs, + rhs, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; align_output_types(context, res); return res; diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index 338825f27aa451..9303f0c590b59b 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -2,17 +2,20 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import torch -import numpy as np -from openvino.frontend import FrontEndManager, ConversionExtension, NodeContext, OpExtension -from openvino.runtime import PartialShape, Type -import openvino.runtime.opset10 as ops - -from pathlib import Path import glob -import re -import os +import itertools import math +import os +import re +from pathlib import Path + +import numpy as np +import openvino.runtime.opset10 as ops +import pytest +import torch +from openvino.frontend import (ConversionExtension, FrontEndManager, + NodeContext, OpExtension) +from openvino.runtime import PartialShape, Type class aten_relu(torch.nn.Module): @@ -298,9 +301,8 @@ def add_ext(front_end, stat): def test_state_dict_names(): - from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder - import torchvision + from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder model = torch.hub.load("pytorch/vision", "resnet18", weights="DEFAULT") decoder = TorchScriptPythonDecoder( model, example_input=(torch.randn(1, 3, 224, 224),)) @@ -356,3 +358,115 @@ def test_shared_consts_reused(): const_names.remove(name) assert len(n.output(0).get_target_inputs()) == 2, f"Constant {n} is not reused" assert len(const_names) == 0, f"Not all constants were found: {const_names}" + + +@pytest.mark.parametrize( + ("l_type", "r_type"), + itertools.product( + [ + float, + int, + torch.bool, + torch.uint8, + torch.int8, + torch.int16, + torch.int32, + torch.int64, + torch.bfloat16, + torch.float16, + torch.float32, + torch.float64, + ], + repeat=2, + ), +) +@pytest.mark.parametrize("l_scalar", [True, False]) +@pytest.mark.parametrize("r_scalar", [True, False]) +def test_pytorch_types_promotion(l_type, r_type, l_scalar, r_scalar): + if l_type == r_type == torch.bool: + pytest.skip("Add does not support both inputs being bool.") + from openvino.frontend.pytorch.ts_decoder import (TorchScriptPythonDecoder, + pt_to_ov_type_map) + + # from openvino.frontend.pytorch.utils import pt_to_ov_type_map + class aten_add_t_t(torch.nn.Module): + def forward(self, x: torch.Tensor, y: torch.Tensor): + return torch.add(x, y) + + class aten_add_int_int(torch.nn.Module): + def forward(self, x: int, y: int): + return torch.add(x, y) + + class aten_add_float_float(torch.nn.Module): + def forward(self, x: float, y: float): + return torch.add(x, y) + + class aten_add_int_float(torch.nn.Module): + def forward(self, x: int, y: float): + return torch.add(x, y) + + class aten_add_float_int(torch.nn.Module): + def forward(self, x: float, y: int): + return torch.add(x, y) + + class aten_add_t_int(torch.nn.Module): + def forward(self, x: torch.Tensor, y: int): + return torch.add(x, y) + + class aten_add_int_t(torch.nn.Module): + def forward(self, x: int, y: torch.Tensor): + return torch.add(x, y) + + class aten_add_t_float(torch.nn.Module): + def forward(self, x: torch.Tensor, y: float): + return torch.add(x, y) + + class aten_add_float_t(torch.nn.Module): + def forward(self, x: float, y: torch.Tensor): + return torch.add(x, y) + + l_t = "t" + r_t = "t" + + if isinstance(l_type, type): + ov_lhs = ops.parameter(PartialShape([]), pt_to_ov_type_map.get(l_type.__name__)) + pt_lhs = l_type(5) + l_t = l_type.__name__ + elif l_scalar: + ov_lhs = ops.parameter(PartialShape([]), pt_to_ov_type_map.get(str(l_type))) + pt_lhs = torch.tensor(1, dtype=l_type) + else: + ov_lhs = ops.parameter(PartialShape([2, 2]), pt_to_ov_type_map.get(str(l_type))) + pt_lhs = torch.rand([2, 2]).to(dtype=l_type) + + if isinstance(r_type, type): + ov_rhs = ops.parameter(PartialShape([]), pt_to_ov_type_map.get(r_type.__name__)) + pt_rhs = r_type(5) + r_t = r_type.__name__ + elif r_scalar: + ov_rhs = ops.parameter(PartialShape([]), pt_to_ov_type_map.get(str(r_type))) + pt_rhs = torch.tensor(1, dtype=r_type) + else: + ov_rhs = ops.parameter(PartialShape([2, 2]), pt_to_ov_type_map.get(str(r_type))) + pt_rhs = torch.rand([2, 2]).to(dtype=r_type) + model = get_scripted_model(locals().get(f"aten_add_{l_t}_{r_t}")()) + decoder = TorchScriptPythonDecoder(model) + fe_manager = FrontEndManager() + fe = fe_manager.load_by_framework("pytorch") + im = fe.load(decoder) + lhs_place = im.get_place_by_tensor_name("x.1") + rhs_place = im.get_place_by_tensor_name("y.1") + im.set_element_type(lhs_place, ov_lhs.get_output_element_type(0)) + im.set_element_type(rhs_place, ov_rhs.get_output_element_type(0)) + im.set_partial_shape(lhs_place, ov_lhs.get_output_partial_shape(0)) + im.set_partial_shape(rhs_place, ov_rhs.get_output_partial_shape(0)) + om = fe.convert(im) + pt_out = model(pt_lhs, pt_rhs) + if isinstance(pt_out, (float, int, bool)): + pt_out_type = type(pt_out).__name__ + pt_out_shape = [] + else: + pt_out_type = pt_out.dtype + pt_out_shape = pt_out.size() + assert pt_to_ov_type_map.get(str(pt_out_type)) == om.get_output_element_type(0) + assert PartialShape(pt_out_shape) == om.get_output_partial_shape(0) From 6d80606b6f23bf177b48f83748fb9672ba47269a Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 15 Feb 2024 11:50:23 +0100 Subject: [PATCH 03/25] Add bool as PyScalar --- src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index cae1db1531c61a..15b48c1c579b91 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -185,7 +185,7 @@ def _get_known_type_for_value(self, pt_type): if pt_type is None: return OVAny(OVType.dynamic) # TODO: Don't use str, use native types - if str(pt_type) in ["int", "float"]: + if str(pt_type) in ["int", "float", "bool"]: return OVAny(DecoderType.PyScalar(OVAny(pt_to_ov_type_map[str(pt_type)]))) elif str(pt_type) in pt_to_ov_type_map: return OVAny(pt_to_ov_type_map[str(pt_type)]) From 4028e05e328d0d50c23aaaa256ae22cf96c145c2 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 15 Feb 2024 12:17:19 +0100 Subject: [PATCH 04/25] Add PyScalar check for distance PT FE op --- src/frontends/pytorch/src/op/distance.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/distance.cpp b/src/frontends/pytorch/src/op/distance.cpp index bc330d07d52a04..6b9405980ed914 100644 --- a/src/frontends/pytorch/src/op/distance.cpp +++ b/src/frontends/pytorch/src/op/distance.cpp @@ -33,7 +33,7 @@ Output pairwise_distance(const NodeContext& context, auto p_plus_eps = context.mark_node(std::make_shared(p, eps)); auto inv_p = context.mark_node(std::make_shared(one, p_plus_eps)); auto minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); - align_eltwise_input_types(context, x, y); + align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); auto x_y_diff = context.mark_node(std::make_shared(x, y)); auto x_y_diff_in_p_power = context.mark_node(std::make_shared(x_y_diff, p)); auto summation = context.mark_node(std::make_shared(x_y_diff_in_p_power, minus_one, keepdim)); From 3ca5fcca87b0aa2c34600d3fb26c595378cc8f2a Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 20 Feb 2024 13:36:17 +0100 Subject: [PATCH 05/25] FIx ConvertPromoteTypes for torch with dynamic rank --- src/core/src/op/convert_promote_types.cpp | 49 +++++++++++-------- .../tests/type_prop/convert_promote_types.cpp | 10 +++- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/src/core/src/op/convert_promote_types.cpp b/src/core/src/op/convert_promote_types.cpp index 35b24d435477ba..69157e669a5846 100644 --- a/src/core/src/op/convert_promote_types.cpp +++ b/src/core/src/op/convert_promote_types.cpp @@ -91,8 +91,6 @@ element::Type evaluate_common_type(const v14::ConvertPromoteTypes* op) { const auto is_input_0_real = input_0_type.is_real(); const auto is_input_1_real = input_1_type.is_real(); - const size_t input_0_bitwidth = input_0_type.bitwidth(); - const size_t input_1_bitwidth = input_1_type.bitwidth(); if (is_input_0_real != is_input_1_real) { // Floating and integer mixed, align to floating @@ -109,27 +107,36 @@ element::Type evaluate_common_type(const v14::ConvertPromoteTypes* op) { } else if (is_input_0_real == is_input_1_real) { // Type formats are the same (both are either floating or integer). - const auto& input_0_pshape = op->get_input_partial_shape(0); - const auto& input_1_pshape = op->get_input_partial_shape(1); - const auto is_input_0_scalar = input_0_pshape.is_static() && is_scalar(input_0_pshape); - const auto is_input_1_scalar = input_1_pshape.is_static() && is_scalar(input_1_pshape); + if (pytorch_scalar_promotion) { + const auto& input_0_rank = op->get_input_partial_shape(0).rank(); + const auto& input_1_rank = op->get_input_partial_shape(1).rank(); + if (input_0_rank.is_dynamic() || input_1_rank.is_dynamic()) { + // For pytorch mode, return element::dynamic if ranks affecting output type are dynamic. + return element::dynamic; + } + const auto is_input_0_scalar = input_0_rank.get_length() == 0; + const auto is_input_1_scalar = input_1_rank.get_length() == 0; + if (is_input_0_scalar != is_input_1_scalar) { + // For pytorch mode, when number formats are same, promote to type of non-scalar input. + const auto target = is_input_0_scalar ? input_1_type : input_0_type; + if (!promote_unsafe) { + // For safe mode, check wether target type has bitwidth able to hold data from scalar type. + const auto scalar = is_input_0_scalar ? input_0_type : input_1_type; + const auto is_pytorch_promote_safe = + ((target.is_signed() == scalar.is_signed() && target.bitwidth() >= scalar.bitwidth()) || + (target.is_signed() && !scalar.is_signed() && target.bitwidth() * 2 >= scalar.bitwidth())); + NODE_VALIDATION_CHECK(op, + is_pytorch_promote_safe, + "Scalar input cannot be PyTorch-like promoted using safe promotion rules."); + } + return target; + } + } const auto is_input_0_signed = input_0_type.is_signed(); const auto is_input_1_signed = input_1_type.is_signed(); - if (pytorch_scalar_promotion && (is_input_0_scalar != is_input_1_scalar)) { - // For pytorch mode, when number formats are same, promote to type of non-scalar input. - const auto target = is_input_0_scalar ? input_1_type : input_0_type; - if (!promote_unsafe) { - // For safe mode, check wether target type has bitwidth able to hold data from scalar type. - const auto scalar = is_input_0_scalar ? input_0_type : input_1_type; - const auto is_pytorch_promote_safe = - ((target.is_signed() == scalar.is_signed() && target.bitwidth() >= scalar.bitwidth()) || - (target.is_signed() && !scalar.is_signed() && target.bitwidth() * 2 >= scalar.bitwidth())); - NODE_VALIDATION_CHECK(op, - is_pytorch_promote_safe, - "Scalar input cannot be PyTorch-like promoted using safe promotion rules."); - } - return target; - } else if ((is_input_0_signed != is_input_1_signed)) { + const size_t input_0_bitwidth = input_0_type.bitwidth(); + const size_t input_1_bitwidth = input_1_type.bitwidth(); + if ((is_input_0_signed != is_input_1_signed)) { // Signed and unsigned integers are mixed, convert to signed integer with bitwidth able to hold all unsigned // data. Exception for u64 + integer - either convert to type from `u64_promotion_target` or fail in safe // mode. diff --git a/src/core/tests/type_prop/convert_promote_types.cpp b/src/core/tests/type_prop/convert_promote_types.cpp index f6d0d27d1c918a..f26961149c1de5 100644 --- a/src/core/tests/type_prop/convert_promote_types.cpp +++ b/src/core/tests/type_prop/convert_promote_types.cpp @@ -285,7 +285,15 @@ INSTANTIATE_TEST_SUITE_P(type_prop_pytorch_mode, ov::element::u8, true, true, - ov::element::u8, + ov::element::dynamic, + ov::element::f32}, + ConvertPromoteTypesTestParams{{}, + ov::element::i32, + ov::PartialShape().dynamic(), + ov::element::f16, + true, + true, + ov::element::f16, ov::element::f32}, ConvertPromoteTypesTestParams{{}, ov::element::f16, From 1443ccc2b1c4a7b05b5cfce7cb088ec0ab91f6f8 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 20 Feb 2024 13:37:10 +0100 Subject: [PATCH 06/25] Update torch type promotion --- src/frontends/pytorch/src/op/erfc.cpp | 4 ++-- src/frontends/pytorch/src/op/rsqrt.cpp | 2 +- src/frontends/pytorch/src/op_table.cpp | 16 ++++++++-------- src/frontends/pytorch/src/utils.cpp | 13 ++++++++----- src/frontends/pytorch/src/utils.hpp | 21 +++++++++++++++++++-- 5 files changed, 38 insertions(+), 18 deletions(-) diff --git a/src/frontends/pytorch/src/op/erfc.cpp b/src/frontends/pytorch/src/op/erfc.cpp index 8e049097102c64..34723a7f9b3bb7 100644 --- a/src/frontends/pytorch/src/op/erfc.cpp +++ b/src/frontends/pytorch/src/op/erfc.cpp @@ -26,7 +26,7 @@ OutputVector translate_erfc(const NodeContext& context) { auto ones = context.mark_node(make_shared(element::f32, Shape{}, 1.0f))->output(0); // align data types of input 'x' and ones - align_eltwise_input_types(context, x, ones); + align_eltwise_input_types(context, x, ones, false, true); // apply Erf to the input tensor 'x' auto y = context.mark_node(make_shared(x)); @@ -42,4 +42,4 @@ OutputVector translate_erfc(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/rsqrt.cpp b/src/frontends/pytorch/src/op/rsqrt.cpp index 4a7d4b3ca6630e..217c495366a785 100644 --- a/src/frontends/pytorch/src/op/rsqrt.cpp +++ b/src/frontends/pytorch/src/op/rsqrt.cpp @@ -21,7 +21,7 @@ OutputVector translate_rsqrt(const NodeContext& context) { auto data = context.get_input(0); auto one_const = context.mark_node(v0::Constant::create(element::f32, Shape({}), {1})); Output fake_const_for_type = context.mark_node(v0::Constant::create(element::f32, Shape({}), {.5})); - align_eltwise_input_types(context, data, fake_const_for_type); + align_eltwise_input_types(context, data, fake_const_for_type, false, true); auto one_const_casted = context.mark_node(std::make_shared(one_const, data)); auto sqrt_data = context.mark_node(std::make_shared(data)); return {context.mark_node(std::make_shared(one_const_casted, sqrt_data))}; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index f44ad64b2bb317..d8fe508d645c9a 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -353,8 +353,8 @@ const std::map get_supported_ops_ts() { // aten::chunk - Supported in limited set of patterns {"aten::clamp", op::translate_clamp}, {"aten::clamp_", op::inplace_op}, - {"aten::clamp_max", op::translate_1to1_match_2_inputs_align_types}, - {"aten::clamp_min", op::translate_1to1_match_2_inputs_align_types}, + {"aten::clamp_max", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::clamp_min", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::clip", op::translate_clamp}, {"aten::clip_", op::inplace_op}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd @@ -390,7 +390,7 @@ const std::map get_supported_ops_ts() { {"aten::embedding_bag", op::translate_embedding_bag}, {"aten::empty", op::translate_empty}, {"aten::empty_like", op::translate_empty_like}, - {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, + {"aten::eq", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::erf", op::translate_erf}, {"aten::erf_", op::inplace_op}, {"aten::erfc", op::translate_erfc}, @@ -420,13 +420,13 @@ const std::map get_supported_ops_ts() { {"aten::full_like", op::translate_full_like}, {"aten::gather", op::translate_gather}, {"aten::gcd", op::translate_gcd}, - {"aten::ge", op::translate_1to1_match_2_inputs_align_types}, + {"aten::ge", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::gelu", op::translate_gelu}, {"aten::glu", op::translate_glu}, {"aten::grid_sampler", op::translate_grid_sampler}, {"aten::group_norm", op::translate_group_norm}, {"aten::gru", op::translate_gru}, - {"aten::gt", op::translate_1to1_match_2_inputs_align_types}, + {"aten::gt", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::hardsigmoid", op::quantizable_op>}, {"aten::hardswish", op::quantizable_op>}, {"aten::hardswish_", op::quantizable_op>>}, @@ -446,7 +446,7 @@ const std::map get_supported_ops_ts() { {"aten::is_nonzero", op::translate_is_nonzero}, {"aten::item", op::translate_1to1_match_1_inputs}, {"aten::layer_norm", op::translate_layer_norm}, - {"aten::le", op::translate_1to1_match_2_inputs_align_types}, + {"aten::le", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::leaky_relu", op::translate_1to1_match_2_inputs}, {"aten::leaky_relu_", op::inplace_op>}, {"aten::len", op::translate_len}, @@ -475,7 +475,7 @@ const std::map get_supported_ops_ts() { {"aten::log10", op::translate_log10}, {"aten::log10_", op::inplace_op}, {"aten::lstm", op::translate_lstm}, - {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, + {"aten::lt", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::masked_fill", op::translate_masked_fill}, {"aten::masked_fill_", op::inplace_op}, {"aten::masked_scatter", op::translate_masked_scatter}, @@ -500,7 +500,7 @@ const std::map get_supported_ops_ts() { {"aten::multiply_", op::inplace_translate_1to1_match_2_inputs_align_types}, {"aten::multinomial", op::translate_multinomial}, {"aten::narrow", op::translate_narrow}, - {"aten::ne", op::translate_1to1_match_2_inputs_align_types}, + {"aten::ne", op::translate_1to1_match_2_inputs_align_to_lhs}, {"aten::neg", op::translate_neg}, {"aten::new_empty", op::translate_new_zeros}, {"aten::new_full", op::translate_new_full}, diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 98ebc2f3eafe71..9c12fe612dce2f 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -392,14 +392,17 @@ void align_eltwise_input_types(const NodeContext& context, Output& lhs, Output& rhs, const bool& is_lhs_python_scalar, - const bool& ir_rhs_python_scalar) { + const bool& is_rhs_python_scalar) { const auto& lhs_type = lhs.get_element_type(); const auto& rhs_type = rhs.get_element_type(); auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); + auto const_1 = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); auto out_type = context.get_output_type(0); - if (is_lhs_python_scalar && !ir_rhs_python_scalar) { + if (is_lhs_python_scalar && !is_rhs_python_scalar) { + lhs = context.mark_node(std::make_shared(lhs, const_1, false)); rhs = context.mark_node(std::make_shared(rhs, const_0)); - } else if (!is_lhs_python_scalar && ir_rhs_python_scalar) { + } else if (!is_lhs_python_scalar && is_rhs_python_scalar) { + rhs = context.mark_node(std::make_shared(rhs, const_1, false)); lhs = context.mark_node(std::make_shared(lhs, const_0)); } @@ -418,9 +421,9 @@ void align_eltwise_input_types(const NodeContext& context, } } - if (is_lhs_python_scalar && !ir_rhs_python_scalar) { + if (is_lhs_python_scalar && !is_rhs_python_scalar) { rhs = context.mark_node(std::make_shared(rhs, const_0)); - } else if (!is_lhs_python_scalar && ir_rhs_python_scalar) { + } else if (!is_lhs_python_scalar && is_rhs_python_scalar) { lhs = context.mark_node(std::make_shared(lhs, const_0)); } return; diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 12f50e39f17a99..24e24fb6cd0d13 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -139,7 +139,7 @@ OutputVector translate_1to1_match_1_inputs_with_fp32_type_alignment(const NodeCo auto x = context.get_input(0); // This const only needed for type alignment auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); - align_eltwise_input_types(context, x, dummy_const); + align_eltwise_input_types(context, x, dummy_const, false, true); return {context.mark_node(std::make_shared(x))}; } @@ -160,12 +160,29 @@ OutputVector translate_1to1_match_2_inputs_align_types(const NodeContext& contex auto rhs_type = context.get_input_type(1); // If type is string or None, we shouldn't align if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && - !rhs_type.is()) + !rhs_type.is()) { + // align_eltwise_input_types(context, lhs, rhs, false, false); align_eltwise_input_types(context, lhs, rhs, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + } + OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; + align_output_types(context, res); + return res; +} + +template +OutputVector translate_1to1_match_2_inputs_align_to_lhs(const NodeContext& context) { + num_inputs_check(context, 2, 2); + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0) && !context.input_is_none(1), "Inputs should not be None."); + auto lhs = context.get_input(0); + auto rhs = context.get_input(1); + auto lhs_type = context.get_input_type(0); + auto rhs_type = context.get_input_type(1); + if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) + rhs = context.mark_node(std::make_shared(rhs, lhs)); OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; align_output_types(context, res); return res; From 9d9260bc437c4f16a3db38309030f7bfd297a446 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Wed, 21 Feb 2024 17:10:38 +0100 Subject: [PATCH 07/25] Modify approach to handle tensor-scalar promotion --- src/frontends/pytorch/src/utils.cpp | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 9c12fe612dce2f..ff561906fae9ca 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -398,19 +398,24 @@ void align_eltwise_input_types(const NodeContext& context, auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto const_1 = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); auto out_type = context.get_output_type(0); + ov::Output tmp_lhs = lhs; + ov::Output tmp_rhs = rhs; if (is_lhs_python_scalar && !is_rhs_python_scalar) { - lhs = context.mark_node(std::make_shared(lhs, const_1, false)); - rhs = context.mark_node(std::make_shared(rhs, const_0)); + tmp_lhs = context.mark_node(std::make_shared(lhs, const_1, false)); + tmp_rhs = context.mark_node(std::make_shared(rhs, const_0)); } else if (!is_lhs_python_scalar && is_rhs_python_scalar) { - rhs = context.mark_node(std::make_shared(rhs, const_1, false)); - lhs = context.mark_node(std::make_shared(lhs, const_0)); + tmp_rhs = context.mark_node(std::make_shared(rhs, const_1, false)); + tmp_lhs = context.mark_node(std::make_shared(lhs, const_0)); } - auto at = context.mark_node(std::make_shared(lhs, rhs, true, true, element::f32)); + auto at = context.mark_node( + std::make_shared(tmp_lhs, tmp_rhs, true, true, element::f32)); auto dst_type = at->get_output_element_type(0); if (dst_type.is_dynamic()) { - lhs = at->output(0); - rhs = at->output(1); + // Add ConvertLike on original node to not remove changes to shape done to differentiate between tensors and + // scalars. + lhs = context.mark_node(std::make_shared(lhs, at->output(0))); + rhs = context.mark_node(std::make_shared(rhs, at->output(1))); } else { // Cast to destination type if (dst_type != lhs_type) { @@ -420,12 +425,6 @@ void align_eltwise_input_types(const NodeContext& context, rhs = context.mark_node(std::make_shared(rhs, dst_type)); } } - - if (is_lhs_python_scalar && !is_rhs_python_scalar) { - rhs = context.mark_node(std::make_shared(rhs, const_0)); - } else if (!is_lhs_python_scalar && is_rhs_python_scalar) { - lhs = context.mark_node(std::make_shared(lhs, const_0)); - } return; } From 4e6edbee993bbb415b43b42052de154783db12d8 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Fri, 23 Feb 2024 13:11:42 +0100 Subject: [PATCH 08/25] Revert int change to i64 --- src/bindings/python/src/openvino/frontend/pytorch/utils.py | 2 +- tests/layer_tests/py_frontend_tests/test_torch_frontend.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index 33f6e81683f40c..157592f3aabee0 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -123,7 +123,7 @@ def graph_has_ops(graph, op_types:list) -> bool: pt_to_ov_type_map = { "float": OVType.f32, - "int": OVType.i64, + "int": OVType.i32, "bool": OVType.boolean, "torch.bfloat16": OVType.bf16, "torch.float16": OVType.f16, diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index f6254b26ae02e3..29200c86c265a5 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -444,7 +444,11 @@ def forward(self, x: float, y: torch.Tensor): else: pt_out_type = pt_out.dtype pt_out_shape = pt_out.size() - assert pt_to_ov_type_map.get(str(pt_out_type)) == om.get_output_element_type(0) + pt_out_type = pt_to_ov_type_map.get(str(pt_out_type)) + ov_out_type = om.get_output_element_type(0) + if pt_out_type == Type.i64 and ov_out_type == Type.i32 and "int" in [l_t, r_t]: + pytest.xfail("Pytorch int-like scalar in OV is converted to i32 instead of i64, mismatch is expected.") + assert pt_out_type == ov_out_type assert PartialShape(pt_out_shape) == om.get_output_partial_shape(0) From cc26dd684279b016ea10c7d74d6fbe6c780eefec Mon Sep 17 00:00:00 2001 From: Mateusz Date: Fri, 23 Feb 2024 18:26:15 +0100 Subject: [PATCH 09/25] Fix simplified_type_interpret for PyScalar --- src/frontends/pytorch/src/utils.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index ff561906fae9ca..1330b84ec2c5bc 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -379,6 +379,11 @@ Any simplified_type_interpret(Any type) { if (tensor.element_type.is()) { return tensor.element_type; } + } else if (type.is()) { + auto scalar = type.as(); + if (scalar.element_type.is()) { + return scalar.element_type; + } } return type; From b536b7466804f1a718ffb4c0d44228173eca2137 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Mon, 26 Feb 2024 09:33:09 +0100 Subject: [PATCH 10/25] Fix issue in device tests --- src/frontends/pytorch/src/utils.hpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 24e24fb6cd0d13..16cf8311a0d9da 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -181,8 +181,11 @@ OutputVector translate_1to1_match_2_inputs_align_to_lhs(const NodeContext& conte auto rhs = context.get_input(1); auto lhs_type = context.get_input_type(0); auto rhs_type = context.get_input_type(1); - if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) - rhs = context.mark_node(std::make_shared(rhs, lhs)); + if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && + !rhs_type.is()) { + if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) + rhs = context.mark_node(std::make_shared(rhs, lhs)); + } OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; align_output_types(context, res); return res; From 83fb5d0a2b57b2736efd6e3f6ab116eedd9ed0d3 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Mon, 26 Feb 2024 13:49:35 +0100 Subject: [PATCH 11/25] Enable add bool test --- src/frontends/pytorch/src/op/add.cpp | 20 +++++++++---------- .../py_frontend_tests/test_torch_frontend.py | 2 -- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index 2d097518ed4a41..214c8da05f4d1a 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -29,6 +29,16 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { // Case when two lists gets concatenated PYTORCH_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); } + if (inplace) { + if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) + rhs = context.mark_node(std::make_shared(rhs, lhs)); + } else { + align_eltwise_input_types(context, + lhs, + rhs, + is_python_scalar_input(context, 0), + is_python_scalar_input(context, 1)); + } auto left_is_bool = lhs.get_element_type() == ov::element::boolean || (dtype0.is() && dtype0.as() == element::boolean); @@ -44,16 +54,6 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { return {logical_or}; } - if (inplace) { - if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) - rhs = context.mark_node(std::make_shared(rhs, lhs)); - } else { - align_eltwise_input_types(context, - lhs, - rhs, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); - } Output alpha; if (!context.input_is_none(2)) { alpha = context.get_input(2); diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index a8de840297d84a..9e119ef5df1641 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -465,8 +465,6 @@ def test_shared_consts_reused(): @pytest.mark.parametrize("l_scalar", [True, False]) @pytest.mark.parametrize("r_scalar", [True, False]) def test_pytorch_types_promotion(l_type, r_type, l_scalar, r_scalar): - if l_type == r_type == torch.bool: - pytest.skip("Add does not support both inputs being bool.") from openvino.frontend.pytorch.ts_decoder import (TorchScriptPythonDecoder, pt_to_ov_type_map) From dcbbf8ead9fee47bfb2b788a03555ea54aa7827a Mon Sep 17 00:00:00 2001 From: Mateusz Date: Mon, 26 Feb 2024 17:22:35 +0100 Subject: [PATCH 12/25] Add helpers for type promotion --- src/frontends/pytorch/src/utils.cpp | 24 ++++++++++++++++++++++++ src/frontends/pytorch/src/utils.hpp | 20 +++++++++++--------- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 1330b84ec2c5bc..8d85118dd2809e 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -445,6 +445,30 @@ void align_output_types(const NodeContext& context, OutputVector& outputs) { } } +Output get_input_with_floating_type(const NodeContext& context, size_t idx) { + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(idx), "Input should not be None."); + auto x = context.get_input(0); + // This const only needed for type alignment + auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); + align_eltwise_input_types(context, x, dummy_const, false, true); + return x; +} + +std::tuple, Output> get_inputs_with_promoted_types(const NodeContext& context, + size_t lhs_idx, + size_t rhs_idx) { + FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(lhs_idx) && !context.input_is_none(rhs_idx), + "Input should not be None."); + auto lhs = context.get_input(lhs_idx); + auto rhs = context.get_input(rhs_idx); + align_eltwise_input_types(context, + lhs, + rhs, + is_python_scalar_input(context, lhs_idx), + is_python_scalar_input(context, rhs_idx)); + return std::make_tuple(lhs, rhs); +} + std::deque> get_list_as_outputs(const Output& start) { std::deque> res; auto current_output = start; diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 16cf8311a0d9da..3c2d8da2f03829 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -103,6 +103,12 @@ void copy_runtime_info_and_name(const std::shared_ptr& from, ov::NodeVector to, const ov::NodeVector& additional_rt_info_src = {}); +Output get_input_with_floating_type(const NodeContext& context, size_t idx); + +std::tuple, Output> get_inputs_with_promoted_types(const NodeContext& context, + size_t lhs_idx, + size_t rhs_idx); + // helper ops Output masked_fill(ov::pass::NodeRegistry& rg, const Output& data, @@ -136,10 +142,7 @@ OutputVector translate_1to1_match_1_inputs(const NodeContext& context) { template OutputVector translate_1to1_match_1_inputs_with_fp32_type_alignment(const NodeContext& context) { FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0), "Input should not be None."); - auto x = context.get_input(0); - // This const only needed for type alignment - auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); - align_eltwise_input_types(context, x, dummy_const, false, true); + auto x = get_input_with_floating_type(context, 0); return {context.mark_node(std::make_shared(x))}; } @@ -161,7 +164,6 @@ OutputVector translate_1to1_match_2_inputs_align_types(const NodeContext& contex // If type is string or None, we shouldn't align if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && !rhs_type.is()) { - // align_eltwise_input_types(context, lhs, rhs, false, false); align_eltwise_input_types(context, lhs, rhs, @@ -181,10 +183,10 @@ OutputVector translate_1to1_match_2_inputs_align_to_lhs(const NodeContext& conte auto rhs = context.get_input(1); auto lhs_type = context.get_input_type(0); auto rhs_type = context.get_input_type(1); - if (!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && - !rhs_type.is()) { - if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) - rhs = context.mark_node(std::make_shared(rhs, lhs)); + if ((!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && + !rhs_type.is()) && + (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type())) { + rhs = context.mark_node(std::make_shared(rhs, lhs)); } OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; align_output_types(context, res); From 61ae5f2be1943c5f54ccfd9034a9a1c9b1849627 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Mon, 26 Feb 2024 17:24:57 +0100 Subject: [PATCH 13/25] Use helpers in existing conversions --- src/frontends/pytorch/src/op/add.cpp | 12 ++++---- src/frontends/pytorch/src/op/avg_poolnd.cpp | 2 +- src/frontends/pytorch/src/op/bitwise.cpp | 18 ++++++------ src/frontends/pytorch/src/op/cross.cpp | 20 ++++--------- src/frontends/pytorch/src/op/floor_divide.cpp | 6 ++-- src/frontends/pytorch/src/op/fmod.cpp | 6 ++-- src/frontends/pytorch/src/op/gcd.cpp | 6 ++-- src/frontends/pytorch/src/op/min_max.cpp | 28 +++++++------------ src/frontends/pytorch/src/op/outer.cpp | 10 ++----- src/frontends/pytorch/src/op/pow.cpp | 6 +--- src/frontends/pytorch/src/op/remainder.cpp | 6 ++-- src/frontends/pytorch/src/op/rsqrt.cpp | 4 +-- src/frontends/pytorch/src/op/rsub.cpp | 15 ++-------- src/frontends/pytorch/src/op/sub.cpp | 18 ++++++------ src/frontends/pytorch/src/op/where.cpp | 6 ++-- 15 files changed, 62 insertions(+), 101 deletions(-) diff --git a/src/frontends/pytorch/src/op/add.cpp b/src/frontends/pytorch/src/op/add.cpp index 214c8da05f4d1a..f0a13a2cb2f3e5 100644 --- a/src/frontends/pytorch/src/op/add.cpp +++ b/src/frontends/pytorch/src/op/add.cpp @@ -20,8 +20,8 @@ using namespace ov::op; OutputVector translate_add_common(const NodeContext& context, bool inplace) { num_inputs_check(context, 2, 3); - auto lhs = context.get_input(0); - auto rhs = context.get_input(1); + Output lhs; + Output rhs; auto dtype0 = context.get_input_type(0); auto dtype1 = context.get_input_type(1); if (dtype0.is() && dtype1.is()) { @@ -30,14 +30,12 @@ OutputVector translate_add_common(const NodeContext& context, bool inplace) { PYTORCH_OP_CONVERSION_CHECK(false, "aten::add is used for concatenation of lists, not possible to convert"); } if (inplace) { + lhs = context.get_input(0); + rhs = context.get_input(1); if (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type()) rhs = context.mark_node(std::make_shared(rhs, lhs)); } else { - align_eltwise_input_types(context, - lhs, - rhs, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + std::tie(lhs, rhs) = get_inputs_with_promoted_types(context, 0, 1); } auto left_is_bool = lhs.get_element_type() == ov::element::boolean || diff --git a/src/frontends/pytorch/src/op/avg_poolnd.cpp b/src/frontends/pytorch/src/op/avg_poolnd.cpp index 4a90db23a67c1e..95b1f3971c5089 100644 --- a/src/frontends/pytorch/src/op/avg_poolnd.cpp +++ b/src/frontends/pytorch/src/op/avg_poolnd.cpp @@ -74,4 +74,4 @@ OutputVector translate_avg_poolnd(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/bitwise.cpp b/src/frontends/pytorch/src/op/bitwise.cpp index 89d7f4c3de94f6..549181f5689b26 100644 --- a/src/frontends/pytorch/src/op/bitwise.cpp +++ b/src/frontends/pytorch/src/op/bitwise.cpp @@ -26,9 +26,9 @@ OutputVector translate_bitwise_not(const NodeContext& context) { OutputVector translate_bitwise_and(const NodeContext& context) { num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto and_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, and_x); @@ -38,9 +38,9 @@ OutputVector translate_bitwise_and(const NodeContext& context) { OutputVector translate_bitwise_or(const NodeContext& context) { num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto or_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, or_x); @@ -50,9 +50,9 @@ OutputVector translate_bitwise_or(const NodeContext& context) { OutputVector translate_bitwise_xor(const NodeContext& context) { num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto xor_x = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, xor_x); diff --git a/src/frontends/pytorch/src/op/cross.cpp b/src/frontends/pytorch/src/op/cross.cpp index 5a33badcc905fa..8219147ae737ca 100644 --- a/src/frontends/pytorch/src/op/cross.cpp +++ b/src/frontends/pytorch/src/op/cross.cpp @@ -35,13 +35,9 @@ OutputVector translate_linalg_cross(const NodeContext& context) { // aten::linalg_cross(Tensor self, Tensor other, int? dim=-1) -> Tensor // aten::linalg_cross.out(Tensor self, Tensor other, int? dim=-1, *, Tensor(a!) out) -> Tensor(a!) num_inputs_check(context, 3, 4); - auto self = context.get_input(0); - auto other = context.get_input(1); - align_eltwise_input_types(context, - self, - other, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + Output self; + Output other; + std::tie(self, other) = get_inputs_with_promoted_types(context, 0, 1); auto const_minus_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); Output dim; if (context.input_is_none(2)) { @@ -63,13 +59,9 @@ OutputVector translate_cross(const NodeContext& context) { // aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor // aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) num_inputs_check(context, 3, 4); - auto self = context.get_input(0); - auto other = context.get_input(1); - align_eltwise_input_types(context, - self, - other, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + Output self; + Output other; + std::tie(self, other) = get_inputs_with_promoted_types(context, 0, 1); Output dim; if (context.input_is_none(2)) { // If dim is not given, it defaults to the first dimension found with the size 3 diff --git a/src/frontends/pytorch/src/op/floor_divide.cpp b/src/frontends/pytorch/src/op/floor_divide.cpp index 7864e72fbfcb7a..5bd7534591e8b1 100644 --- a/src/frontends/pytorch/src/op/floor_divide.cpp +++ b/src/frontends/pytorch/src/op/floor_divide.cpp @@ -17,9 +17,9 @@ using namespace ov::op; OutputVector translate_floor_divide(const NodeContext& context) { num_inputs_check(context, 2, 2); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto div = context.mark_node(std::make_shared(x, y, true)); return {context.mark_node(std::make_shared(div))}; }; diff --git a/src/frontends/pytorch/src/op/fmod.cpp b/src/frontends/pytorch/src/op/fmod.cpp index f60d2cd64b6f1b..a99755b0358ca5 100644 --- a/src/frontends/pytorch/src/op/fmod.cpp +++ b/src/frontends/pytorch/src/op/fmod.cpp @@ -14,9 +14,9 @@ namespace op { OutputVector translate_fmod(const NodeContext& context) { // aten::fmod with schema aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { diff --git a/src/frontends/pytorch/src/op/gcd.cpp b/src/frontends/pytorch/src/op/gcd.cpp index 34b43b87512e11..6566476022a429 100644 --- a/src/frontends/pytorch/src/op/gcd.cpp +++ b/src/frontends/pytorch/src/op/gcd.cpp @@ -23,9 +23,9 @@ using namespace ov::op; OutputVector translate_gcd(const NodeContext& context) { num_inputs_check(context, 2, 2); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto zero_i32 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto trip_count = std::make_shared(element::i32, Shape{}, 1000); diff --git a/src/frontends/pytorch/src/op/min_max.cpp b/src/frontends/pytorch/src/op/min_max.cpp index 71fb33c497172a..06041bcf79f38f 100644 --- a/src/frontends/pytorch/src/op/min_max.cpp +++ b/src/frontends/pytorch/src/op/min_max.cpp @@ -33,12 +33,8 @@ OutputVector translate_max(const NodeContext& context) { } // torch.max(input, other) if (context.input_is_none(2)) { - auto y = context.get_input(1); - align_eltwise_input_types(context, - x, - y, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); return {context.mark_node(std::make_shared(x, y))}; } // torch.max(input, dim, keepdim), returns values and indices @@ -95,12 +91,8 @@ OutputVector translate_min(const NodeContext& context) { } // torch.min(input, other) if (context.input_is_none(2)) { - auto y = context.get_input(1); - align_eltwise_input_types(context, - x, - y, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); return {context.mark_node(std::make_shared(x, y))}; } // torch.min(input, dim, keepdim), returns values and indices @@ -124,9 +116,9 @@ OutputVector translate_maximum(const NodeContext& context) { // aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); @@ -140,9 +132,9 @@ OutputVector translate_minimum(const NodeContext& context) { // aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); auto res = context.mark_node(std::make_shared(x, y)); if (!context.input_is_none(2)) { context.mutate_input(2, res); diff --git a/src/frontends/pytorch/src/op/outer.cpp b/src/frontends/pytorch/src/op/outer.cpp index 2f9db3e9f02cc0..873c4b6f5c11df 100644 --- a/src/frontends/pytorch/src/op/outer.cpp +++ b/src/frontends/pytorch/src/op/outer.cpp @@ -20,13 +20,9 @@ OutputVector translate_outer(const NodeContext& context) { // aten::outer(Tensor self, Tensor vec2) -> Tensor // aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) num_inputs_check(context, 2, 3); - auto vec1 = context.get_input(0); - auto vec2 = context.get_input(1); - align_eltwise_input_types(context, - vec1, - vec2, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + Output vec1; + Output vec2; + std::tie(vec1, vec2) = get_inputs_with_promoted_types(context, 0, 1); auto const_zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); auto const_minus_one = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); vec1 = context.mark_node(std::make_shared(vec1, const_minus_one)); diff --git a/src/frontends/pytorch/src/op/pow.cpp b/src/frontends/pytorch/src/op/pow.cpp index 4b9335489b5345..fbdf1839969f14 100644 --- a/src/frontends/pytorch/src/op/pow.cpp +++ b/src/frontends/pytorch/src/op/pow.cpp @@ -19,11 +19,7 @@ OutputVector translate_pow(const NodeContext& context) { if (inplace) { rhs = std::make_shared(rhs, lhs); } else { - align_eltwise_input_types(context, - lhs, - rhs, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + std::tie(lhs, rhs) = get_inputs_with_promoted_types(context, 0, 1); } auto res = context.mark_node(std::make_shared(lhs, rhs)); if (inplace) { diff --git a/src/frontends/pytorch/src/op/remainder.cpp b/src/frontends/pytorch/src/op/remainder.cpp index 7c3fc389df24e0..735a9f46343694 100644 --- a/src/frontends/pytorch/src/op/remainder.cpp +++ b/src/frontends/pytorch/src/op/remainder.cpp @@ -15,9 +15,9 @@ using namespace ov::op; OutputVector translate_remainder(const NodeContext& context) { num_inputs_check(context, 2, 2); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); return {context.mark_node(std::make_shared(x, y))}; }; diff --git a/src/frontends/pytorch/src/op/rsqrt.cpp b/src/frontends/pytorch/src/op/rsqrt.cpp index 217c495366a785..724cca1c3452e9 100644 --- a/src/frontends/pytorch/src/op/rsqrt.cpp +++ b/src/frontends/pytorch/src/op/rsqrt.cpp @@ -18,10 +18,8 @@ using namespace ov::op; OutputVector translate_rsqrt(const NodeContext& context) { num_inputs_check(context, 1, 1); - auto data = context.get_input(0); + auto data = get_input_with_floating_type(context, 0); auto one_const = context.mark_node(v0::Constant::create(element::f32, Shape({}), {1})); - Output fake_const_for_type = context.mark_node(v0::Constant::create(element::f32, Shape({}), {.5})); - align_eltwise_input_types(context, data, fake_const_for_type, false, true); auto one_const_casted = context.mark_node(std::make_shared(one_const, data)); auto sqrt_data = context.mark_node(std::make_shared(data)); return {context.mark_node(std::make_shared(one_const_casted, sqrt_data))}; diff --git a/src/frontends/pytorch/src/op/rsub.cpp b/src/frontends/pytorch/src/op/rsub.cpp index cad75f7740e740..759799ff22d85a 100644 --- a/src/frontends/pytorch/src/op/rsub.cpp +++ b/src/frontends/pytorch/src/op/rsub.cpp @@ -17,25 +17,16 @@ using namespace ov::op; OutputVector translate_rsub(const NodeContext& context) { num_inputs_check(context, 2, 3); - auto self = context.get_input(0); - auto other = context.get_input(1); + Output self; + Output other; + std::tie(self, other) = get_inputs_with_promoted_types(context, 0, 1); if (!context.input_is_none(2)) { auto alpha = context.get_input(2); - align_eltwise_input_types(context, - self, - other, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); // reverse aten::sub other - self * alpha auto alpha_casted = context.mark_node(std::make_shared(alpha, self)); auto alpha_mul = context.mark_node(std::make_shared(self, alpha_casted)); return {context.mark_node(std::make_shared(other, alpha_mul))}; } - align_eltwise_input_types(context, - self, - other, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); return {context.mark_node(std::make_shared(other, self))}; }; diff --git a/src/frontends/pytorch/src/op/sub.cpp b/src/frontends/pytorch/src/op/sub.cpp index e7d048a0055da8..d43fdf7e40612c 100644 --- a/src/frontends/pytorch/src/op/sub.cpp +++ b/src/frontends/pytorch/src/op/sub.cpp @@ -17,17 +17,15 @@ using namespace ov::op; OutputVector translate_sub_common(const NodeContext& context, bool inplace) { num_inputs_check(context, 2, 3); - auto x = context.get_input(0); - auto y = context.get_input(1); + Output x; + Output y; if (inplace) { + x = context.get_input(0); + y = context.get_input(1); if (x.get_element_type().is_dynamic() || x.get_element_type() != y.get_element_type()) y = context.mark_node(std::make_shared(y, x)); } else { - align_eltwise_input_types(context, - x, - y, - is_python_scalar_input(context, 0), - is_python_scalar_input(context, 1)); + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); } // default alpha is 1 so no need to multiply if alpha is not provided if (!context.input_is_none(2)) { @@ -51,9 +49,9 @@ OutputVector translate_sub_(const NodeContext& context) { OutputVector translate_sub_fx(const NodeContext& context) { num_inputs_check(context, 2, 2); - auto x = context.get_input(0); - auto y = context.get_input(1); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 0), is_python_scalar_input(context, 1)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 0, 1); // default alpha is 1 so no need to multiply if alpha is not provided if (context.has_attribute("alpha")) { auto alpha = context.get_attribute>("alpha"); diff --git a/src/frontends/pytorch/src/op/where.cpp b/src/frontends/pytorch/src/op/where.cpp index db51f04d0e4edb..db81d26463d131 100644 --- a/src/frontends/pytorch/src/op/where.cpp +++ b/src/frontends/pytorch/src/op/where.cpp @@ -19,9 +19,9 @@ OutputVector translate_where(const NodeContext& context) { auto cond = context.get_input(0); PYTORCH_OP_CONVERSION_CHECK(!context.input_is_none(1), "aten::where(cond) unsupported"); auto bool_cond = context.mark_node(std::make_shared(cond, element::boolean)); - auto x = context.get_input(1); - auto y = context.get_input(2); - align_eltwise_input_types(context, x, y, is_python_scalar_input(context, 1), is_python_scalar_input(context, 2)); + Output x; + Output y; + std::tie(x, y) = get_inputs_with_promoted_types(context, 1, 2); return {context.mark_node(std::make_shared(bool_cond, x, y))}; }; From ee3af7f0753d03920c7f60ef9795b38d136ac46c Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 27 Feb 2024 09:15:07 +0100 Subject: [PATCH 14/25] Fix compilation error --- src/frontends/pytorch/src/utils.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 8d85118dd2809e..43a7fce9212113 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -447,7 +447,7 @@ void align_output_types(const NodeContext& context, OutputVector& outputs) { Output get_input_with_floating_type(const NodeContext& context, size_t idx) { FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(idx), "Input should not be None."); - auto x = context.get_input(0); + auto x = context.get_input(static_cast(idx)); // This const only needed for type alignment auto dummy_const = context.mark_node(ov::op::v0::Constant::create(element::f32, Shape({}), {0.5}))->output(0); align_eltwise_input_types(context, x, dummy_const, false, true); @@ -459,8 +459,8 @@ std::tuple, Output> get_inputs_with_promoted_types(const Node size_t rhs_idx) { FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(lhs_idx) && !context.input_is_none(rhs_idx), "Input should not be None."); - auto lhs = context.get_input(lhs_idx); - auto rhs = context.get_input(rhs_idx); + auto lhs = context.get_input(static_cast(lhs_idx)); + auto rhs = context.get_input(static_cast(rhs_idx)); align_eltwise_input_types(context, lhs, rhs, From 49658998e3c1cec51ebff0b9aca5ffd3f2cc0ba3 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 5 Mar 2024 12:43:46 +0100 Subject: [PATCH 15/25] Add missing to floating conversions --- src/frontends/pytorch/src/op/log.cpp | 4 ++-- src/frontends/pytorch/src/op/reciprocal.cpp | 6 +++--- src/frontends/pytorch/src/op/rsqrt.cpp | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/frontends/pytorch/src/op/log.cpp b/src/frontends/pytorch/src/op/log.cpp index c4fc38a71c677b..8f43cfce1126f9 100644 --- a/src/frontends/pytorch/src/op/log.cpp +++ b/src/frontends/pytorch/src/op/log.cpp @@ -85,9 +85,9 @@ OutputVector translate_logsumexp(const NodeContext& context) { OutputVector translate_log1p(const NodeContext& context) { // torch.log1p returns a tensor with the natural logarithm of the elements of input + 1. num_inputs_check(context, 1, 2); - auto x = context.get_input(0); + auto x = get_input_with_floating_type(context, 0); auto one = context.mark_node(v0::Constant::create(element::f32, Shape{}, {1}))->output(0); - align_eltwise_input_types(context, x, one); + one = context.mark_node(std::make_shared(one, x)); auto x_plus_one = context.mark_node(std::make_shared(x, one)); auto log = context.mark_node(std::make_shared(x_plus_one)); return {log}; diff --git a/src/frontends/pytorch/src/op/reciprocal.cpp b/src/frontends/pytorch/src/op/reciprocal.cpp index 04697ea7e7e925..577380e33cfcc3 100644 --- a/src/frontends/pytorch/src/op/reciprocal.cpp +++ b/src/frontends/pytorch/src/op/reciprocal.cpp @@ -17,9 +17,9 @@ using namespace ov::op; OutputVector translate_reciprocal(const NodeContext& context) { num_inputs_check(context, 1, 2); - auto x = context.get_input(0); + auto x = get_input_with_floating_type(context, 0); auto const_neg_1 = context.mark_node(v0::Constant::create(element::f32, Shape{}, {-1}))->output(0); - align_eltwise_input_types(context, x, const_neg_1, true); + const_neg_1 = context.mark_node(std::make_shared(const_neg_1, x)); auto power = context.mark_node(std::make_shared(x, const_neg_1)); return {context.mark_node(power)}; }; @@ -27,4 +27,4 @@ OutputVector translate_reciprocal(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/frontends/pytorch/src/op/rsqrt.cpp b/src/frontends/pytorch/src/op/rsqrt.cpp index 23e613760f3e75..7de735e8962055 100644 --- a/src/frontends/pytorch/src/op/rsqrt.cpp +++ b/src/frontends/pytorch/src/op/rsqrt.cpp @@ -18,7 +18,7 @@ using namespace ov::op; OutputVector translate_rsqrt(const NodeContext& context) { num_inputs_check(context, 1, 2); - auto data = context.get_input(0); + auto data = get_input_with_floating_type(context, 0); auto one_const = context.mark_node(v0::Constant::create(element::f32, Shape({}), {1})); auto one_const_casted = context.mark_node(std::make_shared(one_const, data)); auto sqrt_data = context.mark_node(std::make_shared(data)); From 61112e67123a891e096f086fc13d2f138fd321f9 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 5 Mar 2024 13:04:54 +0100 Subject: [PATCH 16/25] Fix imports --- .../py_frontend_tests/test_torch_frontend.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py index 9e119ef5df1641..fc60a4e7704c37 100644 --- a/tests/layer_tests/py_frontend_tests/test_torch_frontend.py +++ b/tests/layer_tests/py_frontend_tests/test_torch_frontend.py @@ -7,8 +7,8 @@ from openvino.frontend import FrontEndManager, ConversionExtension, NodeContext from openvino.runtime import PartialShape, Type import openvino.runtime.opset10 as ops +import pytest -from pathlib import Path import glob import itertools import math @@ -16,14 +16,6 @@ import re from pathlib import Path -import numpy as np -import openvino.runtime.opset10 as ops -import pytest -import torch -from openvino.frontend import (ConversionExtension, FrontEndManager, - NodeContext, OpExtension) -from openvino.runtime import PartialShape, Type - class aten_relu(torch.nn.Module): def forward(self, x): From c8bd8f323e38bebba985f0df4d66a75d2fcff294 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 5 Mar 2024 14:22:20 +0100 Subject: [PATCH 17/25] Improve erfc --- src/frontends/pytorch/src/op/erfc.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/frontends/pytorch/src/op/erfc.cpp b/src/frontends/pytorch/src/op/erfc.cpp index 34723a7f9b3bb7..e010d8119e7ce9 100644 --- a/src/frontends/pytorch/src/op/erfc.cpp +++ b/src/frontends/pytorch/src/op/erfc.cpp @@ -20,13 +20,12 @@ OutputVector translate_erfc(const NodeContext& context) { // aten::erf(Tensor self) -> Tensor // aten::erf.out(Tensor self, Tensor(!a) out) -> Tensor(!a) num_inputs_check(context, 1, 2); - auto x = context.get_input(0); + auto x = get_input_with_floating_type(context, 0); // create 'ones' to use to calculate complementary of Erf output auto ones = context.mark_node(make_shared(element::f32, Shape{}, 1.0f))->output(0); - // align data types of input 'x' and ones - align_eltwise_input_types(context, x, ones, false, true); + ones = context.mark_node(std::make_shared(ones, x)); // apply Erf to the input tensor 'x' auto y = context.mark_node(make_shared(x)); From 571d9cb7361adfa7b338c16a3671f9d6c9393e37 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Tue, 5 Mar 2024 16:12:39 +0100 Subject: [PATCH 18/25] Remove unused variable --- src/frontends/pytorch/src/utils.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 43a7fce9212113..5eb21ef5c1386f 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -402,7 +402,6 @@ void align_eltwise_input_types(const NodeContext& context, const auto& rhs_type = rhs.get_element_type(); auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); auto const_1 = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); - auto out_type = context.get_output_type(0); ov::Output tmp_lhs = lhs; ov::Output tmp_rhs = rhs; if (is_lhs_python_scalar && !is_rhs_python_scalar) { From e40b7b5429d9bc1729b79b8664486c828ea1a464 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Fri, 8 Mar 2024 15:15:00 +0000 Subject: [PATCH 19/25] Remove newline --- src/frontends/pytorch/src/op/avg_poolnd.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/op/avg_poolnd.cpp b/src/frontends/pytorch/src/op/avg_poolnd.cpp index 8dfb8255b46ffc..57389b7b42e872 100644 --- a/src/frontends/pytorch/src/op/avg_poolnd.cpp +++ b/src/frontends/pytorch/src/op/avg_poolnd.cpp @@ -74,4 +74,4 @@ OutputVector translate_avg_poolnd(const NodeContext& context) { } // namespace op } // namespace pytorch } // namespace frontend -} // namespace ov +} // namespace ov \ No newline at end of file From a6f492834b1b39bcd0ddb5615bb6a117f746111f Mon Sep 17 00:00:00 2001 From: Mateusz Date: Fri, 8 Mar 2024 15:15:32 +0000 Subject: [PATCH 20/25] Disable transform --- src/frontends/pytorch/src/frontend.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 65f35e06da10a7..b0099dc0d90dc4 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -218,8 +218,6 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); manager.register_pass(); - // Second pass of ConvertConvertPromoteTypes after all converting transformations - manager.register_pass(); manager.register_pass(true); manager.run_passes(model); From 5345d1325aff11c6e9b13a672b29b6083fac7375 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Fri, 8 Mar 2024 15:15:44 +0000 Subject: [PATCH 21/25] Remove align to lhs --- src/frontends/pytorch/src/op_table.cpp | 16 ++++++++-------- src/frontends/pytorch/src/utils.hpp | 18 ------------------ 2 files changed, 8 insertions(+), 26 deletions(-) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 3e4f0634e6f0ce..d846badb5ca629 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -364,8 +364,8 @@ const std::map get_supported_ops_ts() { // aten::chunk - Supported in limited set of patterns {"aten::clamp", op::translate_clamp}, {"aten::clamp_", op::inplace_op}, - {"aten::clamp_max", op::translate_1to1_match_2_inputs_align_to_lhs}, - {"aten::clamp_min", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::clamp_max", op::translate_1to1_match_2_inputs_align_types}, + {"aten::clamp_min", op::translate_1to1_match_2_inputs_align_types}, {"aten::clip", op::translate_clamp}, {"aten::clip_", op::inplace_op}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd @@ -401,7 +401,7 @@ const std::map get_supported_ops_ts() { {"aten::embedding_bag", op::translate_embedding_bag}, {"aten::empty", op::translate_empty}, {"aten::empty_like", op::translate_empty_like}, - {"aten::eq", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, {"aten::erf", op::translate_erf}, {"aten::erf_", op::inplace_op}, {"aten::erfc", op::translate_erfc}, @@ -433,13 +433,13 @@ const std::map get_supported_ops_ts() { {"aten::full_like", op::translate_full_like}, {"aten::gather", op::translate_gather}, {"aten::gcd", op::translate_gcd}, - {"aten::ge", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::ge", op::translate_1to1_match_2_inputs_align_types}, {"aten::gelu", op::translate_gelu}, {"aten::glu", op::translate_glu}, {"aten::grid_sampler", op::translate_grid_sampler}, {"aten::group_norm", op::translate_group_norm}, {"aten::gru", op::translate_gru}, - {"aten::gt", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::gt", op::translate_1to1_match_2_inputs_align_types}, {"aten::hardsigmoid", op::quantizable_op>}, {"aten::hardsigmoid_", op::quantizable_op>>}, @@ -462,7 +462,7 @@ const std::map get_supported_ops_ts() { {"aten::is_nonzero", op::translate_is_nonzero}, {"aten::item", op::translate_1to1_match_1_inputs}, {"aten::layer_norm", op::translate_layer_norm}, - {"aten::le", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::le", op::translate_1to1_match_2_inputs_align_types}, {"aten::leaky_relu", op::translate_1to1_match_2_inputs}, {"aten::leaky_relu_", op::inplace_op>}, {"aten::len", op::translate_len}, @@ -492,7 +492,7 @@ const std::map get_supported_ops_ts() { {"aten::log10", op::optional_out}, {"aten::log10_", op::inplace_op}, {"aten::lstm", op::translate_lstm}, - {"aten::lt", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, {"aten::masked_fill", op::translate_masked_fill}, {"aten::masked_fill_", op::inplace_op}, {"aten::masked_scatter", op::translate_masked_scatter}, @@ -517,7 +517,7 @@ const std::map get_supported_ops_ts() { {"aten::multiply_", op::translate_mul_}, {"aten::multinomial", op::translate_multinomial}, {"aten::narrow", op::translate_narrow}, - {"aten::ne", op::translate_1to1_match_2_inputs_align_to_lhs}, + {"aten::ne", op::translate_1to1_match_2_inputs_align_types}, {"aten::neg", op::translate_neg}, {"aten::new_empty", op::translate_new_zeros}, {"aten::new_full", op::translate_new_full}, diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 010db6862b813a..3f612e36458ee2 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -186,24 +186,6 @@ OutputVector translate_1to1_match_2_inputs_align_types(const NodeContext& contex return res; } -template -OutputVector translate_1to1_match_2_inputs_align_to_lhs(const NodeContext& context) { - num_inputs_check(context, 2, 2); - FRONT_END_OP_CONVERSION_CHECK(!context.input_is_none(0) && !context.input_is_none(1), "Inputs should not be None."); - auto lhs = context.get_input(0); - auto rhs = context.get_input(1); - auto lhs_type = context.get_input_type(0); - auto rhs_type = context.get_input_type(1); - if ((!lhs_type.is() && !rhs_type.is() && !lhs_type.is() && - !rhs_type.is()) && - (lhs.get_element_type().is_dynamic() || lhs.get_element_type() != rhs.get_element_type())) { - rhs = context.mark_node(std::make_shared(rhs, lhs)); - } - OutputVector res = {context.mark_node(std::make_shared(lhs, rhs))}; - align_output_types(context, res); - return res; -} - template OutputVector inplace_translate_1to1_match_2_inputs_align_types(const NodeContext& context) { num_inputs_check(context, 2, 2); From 971ff78b96ad06b3995b9d40fdcf7242683c941e Mon Sep 17 00:00:00 2001 From: Mateusz Mikolajczyk Date: Tue, 12 Mar 2024 11:29:01 +0100 Subject: [PATCH 22/25] Apply suggestions from code review Co-authored-by: Pawel Raasz --- src/core/src/op/convert_promote_types.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core/src/op/convert_promote_types.cpp b/src/core/src/op/convert_promote_types.cpp index 69157e669a5846..e38f54de845a02 100644 --- a/src/core/src/op/convert_promote_types.cpp +++ b/src/core/src/op/convert_promote_types.cpp @@ -118,10 +118,10 @@ element::Type evaluate_common_type(const v14::ConvertPromoteTypes* op) { const auto is_input_1_scalar = input_1_rank.get_length() == 0; if (is_input_0_scalar != is_input_1_scalar) { // For pytorch mode, when number formats are same, promote to type of non-scalar input. - const auto target = is_input_0_scalar ? input_1_type : input_0_type; + const auto& target = is_input_0_scalar ? input_1_type : input_0_type; if (!promote_unsafe) { // For safe mode, check wether target type has bitwidth able to hold data from scalar type. - const auto scalar = is_input_0_scalar ? input_0_type : input_1_type; + const auto& scalar = is_input_0_scalar ? input_0_type : input_1_type; const auto is_pytorch_promote_safe = ((target.is_signed() == scalar.is_signed() && target.bitwidth() >= scalar.bitwidth()) || (target.is_signed() && !scalar.is_signed() && target.bitwidth() * 2 >= scalar.bitwidth())); @@ -134,8 +134,8 @@ element::Type evaluate_common_type(const v14::ConvertPromoteTypes* op) { } const auto is_input_0_signed = input_0_type.is_signed(); const auto is_input_1_signed = input_1_type.is_signed(); - const size_t input_0_bitwidth = input_0_type.bitwidth(); - const size_t input_1_bitwidth = input_1_type.bitwidth(); + const auto input_0_bitwidth = input_0_type.bitwidth(); + const auto input_1_bitwidth = input_1_type.bitwidth(); if ((is_input_0_signed != is_input_1_signed)) { // Signed and unsigned integers are mixed, convert to signed integer with bitwidth able to hold all unsigned // data. Exception for u64 + integer - either convert to type from `u64_promotion_target` or fail in safe From 264e7837d0cac9c254b93cfe70b28dff6021bc46 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 14 Mar 2024 14:41:06 +0100 Subject: [PATCH 23/25] Fix issue with dynamic types in Mish-4 op --- src/core/src/op/mish.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/src/op/mish.cpp b/src/core/src/op/mish.cpp index 98d349af04ddd5..601c2cf3541edc 100644 --- a/src/core/src/op/mish.cpp +++ b/src/core/src/op/mish.cpp @@ -36,7 +36,7 @@ void Mish::validate_and_infer_types() { const auto& data_batch_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, - data_batch_et.is_real(), + data_batch_et.is_real() || data_batch_et.is_dynamic(), "Element must be of floating point type, Got: ", data_batch_et); From 26f554b497521babed4fa7e7b6c13e870fc3fbe6 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 14 Mar 2024 16:26:35 +0100 Subject: [PATCH 24/25] Improve handling dynamic ranks for mixed Tensor+Scalar cases --- src/frontends/pytorch/src/utils.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 3de4a102ff92e3..b5bdbdf97f13ab 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -401,16 +401,19 @@ void align_eltwise_input_types(const NodeContext& context, const bool& is_rhs_python_scalar) { const auto& lhs_type = lhs.get_element_type(); const auto& rhs_type = rhs.get_element_type(); - auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {0}); - auto const_1 = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); + auto const_0 = ov::op::v0::Constant::create(element::i32, Shape{}, {1}); + auto const_1 = ov::op::v0::Constant::create(element::i32, Shape{1}, {1}); + // Create temporary copy of lhs and rhs for ConvertPromoteTypes to not modify original nodes. ov::Output tmp_lhs = lhs; ov::Output tmp_rhs = rhs; + // Python scalar has lower priority than any tensor with any dimension. + // If only one input is PyScalar, replace it with const to mitigate issues with dynamic type caused by dynamic shape. if (is_lhs_python_scalar && !is_rhs_python_scalar) { - tmp_lhs = context.mark_node(std::make_shared(lhs, const_1, false)); - tmp_rhs = context.mark_node(std::make_shared(rhs, const_0)); + tmp_lhs = context.mark_node(std::make_shared(const_0, lhs)); + tmp_rhs = context.mark_node(std::make_shared(const_1, rhs)); } else if (!is_lhs_python_scalar && is_rhs_python_scalar) { - tmp_rhs = context.mark_node(std::make_shared(rhs, const_1, false)); - tmp_lhs = context.mark_node(std::make_shared(lhs, const_0)); + tmp_lhs = context.mark_node(std::make_shared(const_1, lhs)); + tmp_rhs = context.mark_node(std::make_shared(const_0, rhs)); } auto at = context.mark_node( From 728127fe2aecab10707b0c57429d91616024ff60 Mon Sep 17 00:00:00 2001 From: Mateusz Date: Thu, 14 Mar 2024 16:42:03 +0100 Subject: [PATCH 25/25] Fix code style --- src/frontends/pytorch/src/utils.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index b5bdbdf97f13ab..f5a0416bb43383 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -407,7 +407,8 @@ void align_eltwise_input_types(const NodeContext& context, ov::Output tmp_lhs = lhs; ov::Output tmp_rhs = rhs; // Python scalar has lower priority than any tensor with any dimension. - // If only one input is PyScalar, replace it with const to mitigate issues with dynamic type caused by dynamic shape. + // If only one input is PyScalar, replace it with const to mitigate issues with dynamic type caused by dynamic + // shape. if (is_lhs_python_scalar && !is_rhs_python_scalar) { tmp_lhs = context.mark_node(std::make_shared(const_0, lhs)); tmp_rhs = context.mark_node(std::make_shared(const_1, rhs));