Skip to content

Commit

Permalink
Squeeze w/o axes [1, -1] -> dyn_rank (openvinotoolkit#19593)
Browse files Browse the repository at this point in the history
Removes PDPD logic from core code, keeps PDPD specifics in the translator
  • Loading branch information
jane-intel authored Sep 15, 2023
1 parent d623483 commit 2205f48
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 30 deletions.
32 changes: 16 additions & 16 deletions src/core/shape_inference/include/squeeze_shape_inference.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ std::vector<TRShape> shape_infer(const Squeeze* op,
OPENVINO_SUPPRESS_DEPRECATED_END
unique_axes.reset(new std::set<int64_t>(axes->cbegin(), axes->cend()));
} else if (arg_rank.get_length() > 0 && shape_size(axes_shape.to_shape()) == 1) {
// The `axes` input must be a Parameter with single element to ensure uniqueness of axes
// only rank is deduced
// The `axes` input is a single element tensor which is unique by definition, deducing output rank
NODE_VALIDATION_CHECK(op,
std::any_of(arg_shape.cbegin(),
arg_shape.cend(),
Expand All @@ -80,14 +79,21 @@ std::vector<TRShape> shape_infer(const Squeeze* op,
if (arg_rank.is_static() && (unique_axes != nullptr)) {
output_shape.resize(0);
if (unique_axes->empty()) {
// According to specification, if only first input provided` or axes are empty
// remove all dimensions equal to 1.
std::copy_if(arg_shape.cbegin(),
arg_shape.cend(),
std::back_inserter(output_shape),
[](const DimType& dim) {
return !dim.compatible(1);
});
// if only first input provided or axes are empty remove all dimensions equal to 1.
if (std::any_of(arg_shape.cbegin(), arg_shape.cend(), [](const DimType& d) {
return d.is_dynamic() && d.compatible(1);
})) {
// we are unsure if dynamic dimensions would be equal to 1 or not, so we set dynamic output rank
output_shape = PartialShape::dynamic();
return output_shapes;
} else {
std::copy_if(arg_shape.cbegin(),
arg_shape.cend(),
std::back_inserter(output_shape),
[](const DimType& dim) {
return !dim.compatible(1);
});
}
} else {
int64_t idx = 0;
auto rm_axis_iter = unique_axes->cbegin();
Expand All @@ -111,12 +117,6 @@ std::vector<TRShape> shape_infer(const Squeeze* op,
std::back_inserter(output_shape),
not_squeezable_at_axis);
}
// When arg shape has got static rank but shape is dynamic and output shape dimensions is empty (scalar)
// make dynamic output except the case when arg_shape is 1-D shape with 0 or 1 element then should be scalar.
if (arg_shape.is_dynamic() && (output_shape.size() == 0) &&
!(arg_rank.get_length() == 1 && arg_shape[0].get_max_length() <= 1)) {
output_shape = PartialShape::dynamic();
}
} else {
output_shape = PartialShape::dynamic();
}
Expand Down
8 changes: 7 additions & 1 deletion src/core/tests/type_prop/squeeze.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,12 @@ TEST(type_prop, squeeze_axes_invalid_value) {
HasSubstr("provided axis value is invalid. Only axes of size 1 may be removed."));
}

TEST(type_prop, squeeze_single_input) {
auto param = make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{1, -1, 3, 4});
auto s = make_shared<op::v0::Squeeze>(param);
EXPECT_EQ(s->get_output_partial_shape(0), PartialShape::dynamic());
}

TEST(type_prop, squeeze_axes_invalid_rank) {
auto param = make_shared<ov::op::v0::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto axes_node = make_shared<ov::op::v0::Constant>(element::i32, Shape{2, 1}, vector<int32_t>{0, 2});
Expand Down Expand Up @@ -254,7 +260,7 @@ const auto empty_axes_test_values =
std::vector<int64_t>{},
PartialShape{Dimension(2, 5), Dimension(3, 4), 6}),
std::make_tuple(PartialShape::dynamic(6), std::vector<int64_t>{}, PartialShape::dynamic()),
std::make_tuple(PartialShape{Dimension(0, 1)}, std::vector<int64_t>{}, PartialShape{}),
std::make_tuple(PartialShape{Dimension(0, 1)}, std::vector<int64_t>{}, PartialShape::dynamic()),
std::make_tuple(PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()},
std::vector<int64_t>{},
PartialShape::dynamic()),
Expand Down
22 changes: 9 additions & 13 deletions src/frontends/paddle/src/op/slice_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,26 +91,22 @@ NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) {
const auto decrease_axis = node.get_attribute<std::vector<int32_t>>("decrease_axis");

if (decrease_axis.size() > 0) {
// according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar.
// Ref: paddle/fluid/operators/slice_op.h
PartialShape input_shape = data.get_partial_shape();
PADDLE_OP_CHECK(node,
input_shape.rank().is_static(),
"input rank of slice must be static when decrease_axis is set.");

if (input_shape.size() == decrease_axis.size()) {
// according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar.
// Ref: paddle/fluid/operators/slice_op.h
auto decreased_node = std::make_shared<default_opset::Reshape>(
stride_slice_node,
std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1),
false);
return node.default_single_output_mapping({decreased_node}, {"Out"});
}
const auto squeeze_index_node =
default_opset::Constant::create(element::i32, {decrease_axis.size()}, decrease_axis);
const auto decreased_node = std::make_shared<default_opset::Squeeze>(stride_slice_node, squeeze_index_node);

const auto input_rank = input_shape.rank().get_length();
if ((size_t)input_rank == decrease_axis.size()) {
auto restore_node = std::make_shared<default_opset::Reshape>(
decreased_node,
std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1),
false); // restore to shape (1,)
return node.default_single_output_mapping({restore_node}, {"Out"});
}

return node.default_single_output_mapping({decreased_node}, {"Out"});
}

Expand Down

0 comments on commit 2205f48

Please sign in to comment.