From 2205f48c71ede01fdc59a909929c9dcf23065e6c Mon Sep 17 00:00:00 2001 From: Evgenya Nugmanova Date: Fri, 15 Sep 2023 12:27:12 +0400 Subject: [PATCH] Squeeze w/o axes [1, -1] -> dyn_rank (#19593) Removes PDPD logic from core code, keeps PDPD specifics in the translator --- .../include/squeeze_shape_inference.hpp | 32 +++++++++---------- src/core/tests/type_prop/squeeze.cpp | 8 ++++- src/frontends/paddle/src/op/slice_ops.hpp | 22 ++++++------- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/src/core/shape_inference/include/squeeze_shape_inference.hpp b/src/core/shape_inference/include/squeeze_shape_inference.hpp index 774542b3edb477..d6c167975ac7d3 100644 --- a/src/core/shape_inference/include/squeeze_shape_inference.hpp +++ b/src/core/shape_inference/include/squeeze_shape_inference.hpp @@ -56,8 +56,7 @@ std::vector shape_infer(const Squeeze* op, OPENVINO_SUPPRESS_DEPRECATED_END unique_axes.reset(new std::set(axes->cbegin(), axes->cend())); } else if (arg_rank.get_length() > 0 && shape_size(axes_shape.to_shape()) == 1) { - // The `axes` input must be a Parameter with single element to ensure uniqueness of axes - // only rank is deduced + // The `axes` input is a single element tensor which is unique by definition, deducing output rank NODE_VALIDATION_CHECK(op, std::any_of(arg_shape.cbegin(), arg_shape.cend(), @@ -80,14 +79,21 @@ std::vector shape_infer(const Squeeze* op, if (arg_rank.is_static() && (unique_axes != nullptr)) { output_shape.resize(0); if (unique_axes->empty()) { - // According to specification, if only first input provided` or axes are empty - // remove all dimensions equal to 1. - std::copy_if(arg_shape.cbegin(), - arg_shape.cend(), - std::back_inserter(output_shape), - [](const DimType& dim) { - return !dim.compatible(1); - }); + // if only first input provided or axes are empty remove all dimensions equal to 1. + if (std::any_of(arg_shape.cbegin(), arg_shape.cend(), [](const DimType& d) { + return d.is_dynamic() && d.compatible(1); + })) { + // we are unsure if dynamic dimensions would be equal to 1 or not, so we set dynamic output rank + output_shape = PartialShape::dynamic(); + return output_shapes; + } else { + std::copy_if(arg_shape.cbegin(), + arg_shape.cend(), + std::back_inserter(output_shape), + [](const DimType& dim) { + return !dim.compatible(1); + }); + } } else { int64_t idx = 0; auto rm_axis_iter = unique_axes->cbegin(); @@ -111,12 +117,6 @@ std::vector shape_infer(const Squeeze* op, std::back_inserter(output_shape), not_squeezable_at_axis); } - // When arg shape has got static rank but shape is dynamic and output shape dimensions is empty (scalar) - // make dynamic output except the case when arg_shape is 1-D shape with 0 or 1 element then should be scalar. - if (arg_shape.is_dynamic() && (output_shape.size() == 0) && - !(arg_rank.get_length() == 1 && arg_shape[0].get_max_length() <= 1)) { - output_shape = PartialShape::dynamic(); - } } else { output_shape = PartialShape::dynamic(); } diff --git a/src/core/tests/type_prop/squeeze.cpp b/src/core/tests/type_prop/squeeze.cpp index 3047fb9acd03fa..7561a65c798d54 100644 --- a/src/core/tests/type_prop/squeeze.cpp +++ b/src/core/tests/type_prop/squeeze.cpp @@ -27,6 +27,12 @@ TEST(type_prop, squeeze_axes_invalid_value) { HasSubstr("provided axis value is invalid. Only axes of size 1 may be removed.")); } +TEST(type_prop, squeeze_single_input) { + auto param = make_shared(element::f32, PartialShape{1, -1, 3, 4}); + auto s = make_shared(param); + EXPECT_EQ(s->get_output_partial_shape(0), PartialShape::dynamic()); +} + TEST(type_prop, squeeze_axes_invalid_rank) { auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); auto axes_node = make_shared(element::i32, Shape{2, 1}, vector{0, 2}); @@ -254,7 +260,7 @@ const auto empty_axes_test_values = std::vector{}, PartialShape{Dimension(2, 5), Dimension(3, 4), 6}), std::make_tuple(PartialShape::dynamic(6), std::vector{}, PartialShape::dynamic()), - std::make_tuple(PartialShape{Dimension(0, 1)}, std::vector{}, PartialShape{}), + std::make_tuple(PartialShape{Dimension(0, 1)}, std::vector{}, PartialShape::dynamic()), std::make_tuple(PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()}, std::vector{}, PartialShape::dynamic()), diff --git a/src/frontends/paddle/src/op/slice_ops.hpp b/src/frontends/paddle/src/op/slice_ops.hpp index 37026111d15c91..dc2a609ba18513 100644 --- a/src/frontends/paddle/src/op/slice_ops.hpp +++ b/src/frontends/paddle/src/op/slice_ops.hpp @@ -91,26 +91,22 @@ NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) { const auto decrease_axis = node.get_attribute>("decrease_axis"); if (decrease_axis.size() > 0) { - // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. - // Ref: paddle/fluid/operators/slice_op.h PartialShape input_shape = data.get_partial_shape(); PADDLE_OP_CHECK(node, input_shape.rank().is_static(), "input rank of slice must be static when decrease_axis is set."); - + if (input_shape.size() == decrease_axis.size()) { + // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. + // Ref: paddle/fluid/operators/slice_op.h + auto decreased_node = std::make_shared( + stride_slice_node, + std::make_shared(element::i64, Shape{1}, 1), + false); + return node.default_single_output_mapping({decreased_node}, {"Out"}); + } const auto squeeze_index_node = default_opset::Constant::create(element::i32, {decrease_axis.size()}, decrease_axis); const auto decreased_node = std::make_shared(stride_slice_node, squeeze_index_node); - - const auto input_rank = input_shape.rank().get_length(); - if ((size_t)input_rank == decrease_axis.size()) { - auto restore_node = std::make_shared( - decreased_node, - std::make_shared(element::i64, Shape{1}, 1), - false); // restore to shape (1,) - return node.default_single_output_mapping({restore_node}, {"Out"}); - } - return node.default_single_output_mapping({decreased_node}, {"Out"}); }