Skip to content

Commit

Permalink
Revise gelu (openvinotoolkit#5327)
Browse files Browse the repository at this point in the history
* add type_prop and backend tests for gelu op

* add visitor test for gelu op

* add additional type_prop and backend tests and remove gelu from manifest

* resolve conflicts

* fix indentation and remove unecessary includes

* remove gelu from manifests
  • Loading branch information
bszmelcz authored May 14, 2021
1 parent 6e6e4b7 commit 314a1d7
Show file tree
Hide file tree
Showing 6 changed files with 190 additions and 76 deletions.
1 change: 1 addition & 0 deletions ngraph/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,7 @@ set(SRC
visitors/op/extractimagepatches.cpp
visitors/op/fake_quantize.cpp
visitors/op/gather.cpp
visitors/op/gelu.cpp
visitors/op/grn.cpp
visitors/op/group_conv.cpp
visitors/op/interpolate.cpp
Expand Down
134 changes: 65 additions & 69 deletions ngraph/test/backend/gelu.in.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,90 +2,86 @@
// SPDX-License-Identifier: Apache-2.0
//

#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#include "util/random.hpp"

// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif

#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on

#include "gtest/gtest.h"
#include "runtime/backend.hpp"
#include "ngraph/runtime/tensor.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/engine/test_engines.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"

using namespace std;
using namespace ngraph;

static string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});

NGRAPH_TEST(${BACKEND_NAME}, gelu_f32)
NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_8D)
{
Shape shape{100000};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Gelu>(A), ParameterVector{A});

auto backend = runtime::Backend::create("${BACKEND_NAME}");

test::Uniform<float> rng(-100.0f, 100.0f);
vector<vector<float>> args;
for (shared_ptr<op::Parameter> param : f->get_parameters())
{
auto name = param->get_name();
vector<float> tensor_val(shape_size(param->get_shape()));
rng.initialize(tensor_val);
args.push_back(tensor_val);
}

// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, args[0]);
auto result = backend->create_tensor(element::f32, shape);

std::transform(args[0].begin(), args[0].end(), args[0].begin(), [](float x) -> float {
return 0.5f * x * (1.0f + erf(x / sqrt(2.0f)));
});

auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close(args[0], read_vector<float>(result), .007f, .007f));
Shape in_shape{8};
element::Type et = element::f32;

auto param = make_shared<op::Parameter>(et, in_shape);
auto gelu = make_shared<op::v7::Gelu>(param);
auto f = make_shared<Function>(gelu, ParameterVector{param});

vector<float> in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
vector<float> out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507};

auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(in_shape, in_vec);
test_case.add_expected_output<float>(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}

NGRAPH_TEST(${BACKEND_NAME}, gelu_f64)
NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_8D)
{
Shape shape{8};
auto A = make_shared<op::Parameter>(element::f64, shape);
auto f = make_shared<Function>(make_shared<op::Gelu>(A), ParameterVector{A});
Shape in_shape{8};
element::Type et = element::f32;

auto param = make_shared<op::Parameter>(et, in_shape);
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);
auto f = make_shared<Function>(gelu, ParameterVector{param});

vector<float> in_vec{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
vector<float> out_vec{-0.00012636185, -0.0040495098, -0.04550028, -0.15865529, 0.0, 0.8413447, 1.9544997, 2.9959507};

auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(in_shape, in_vec);
test_case.add_expected_output<float>(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-3f);
}

auto backend = runtime::Backend::create("${BACKEND_NAME}");
NGRAPH_TEST(${BACKEND_NAME}, gelu_erf_mode_inference_f32_3D)
{
Shape in_shape{3};
element::Type et = element::f32;

auto param = make_shared<op::Parameter>(et, in_shape);
auto gelu = make_shared<op::v7::Gelu>(param);
auto f = make_shared<Function>(gelu, ParameterVector{param});

vector<float> in_vec{-0.5, 0.1, 0.4};
vector<float> out_vec{-0.15426877, 0.05398279, 0.2621686};

auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input(in_shape, in_vec);
test_case.add_expected_output(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}

NGRAPH_TEST(${BACKEND_NAME}, gelu_tanh_mode_inference_f32_3D)
{
Shape in_shape{3};
element::Type et = element::f32;

// Create some tensors for input/output
auto a = backend->create_tensor(element::f64, shape);
vector<double> input{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0};
copy_data(a, input);
auto result = backend->create_tensor(element::f64, shape);
auto param = make_shared<op::Parameter>(et, in_shape);
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);
auto f = make_shared<Function>(gelu, ParameterVector{param});

std::transform(input.begin(), input.end(), input.begin(), [](double x) -> double {
return 0.5 * x * (1.0 + erf(x / sqrt(2.0)));
});
vector<float> in_vec{-0.5, 0.1, 0.4};
vector<float> out_vec{-0.15428599, 0.053982753, 0.262161165};

auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_TRUE(test::all_close_f(input, read_vector<double>(result)));
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input(in_shape, in_vec);
test_case.add_expected_output(in_shape, out_vec);
test_case.run_with_tolerance_as_fp(1e-4f);
}
4 changes: 0 additions & 4 deletions ngraph/test/runtime/ie/unit_test.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -940,10 +940,6 @@ topk_max_sort_index
topk_min_sort_index
topk_1d_i32_max_all
topk_int64
gelu_f32
gelu_f64
gelu_backprop_factor_f32
gelu_backprop_factor_f64

backwards_abs
backwards_acos
Expand Down
3 changes: 0 additions & 3 deletions ngraph/test/runtime/interpreter/unit_test.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,6 @@ INTERPRETER.min_to_scalar_int8
INTERPRETER.max_trivial_int8
INTERPRETER.max_to_scalar_int8
INTERPRETER.max_3d_to_scalar_double
INTERPRETER.gelu_f64
INTERPRETER.gelu_backprop_factor_f64
INTERPRETER.backwards_gelu_f64
INTERPRETER.gather_4d_indices_axis_0_uint8
INTERPRETER.gather_axis_0_int8
INTERPRETER.gather_axis_0_int16
Expand Down
97 changes: 97 additions & 0 deletions ngraph/test/type_prop/gelu.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"

using namespace std;
using namespace ngraph;

TEST(type_prop, gelu_default_mode_inference_f32)
{
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 32, 32});
auto gelu = make_shared<op::v7::Gelu>(param);

ASSERT_EQ(gelu->get_element_type(), element::f32);
ASSERT_EQ(gelu->get_shape(), (Shape{1, 32, 32}));
ASSERT_EQ(gelu->get_approximation_mode(), op::GeluApproximationMode::ERF);
}

TEST(type_prop, gelu_default_mode_inference_f16)
{
auto param = make_shared<op::Parameter>(element::f16, Shape{1, 32, 32});
auto gelu = make_shared<op::v7::Gelu>(param);

ASSERT_EQ(gelu->get_element_type(), element::f16);
ASSERT_EQ(gelu->get_shape(), (Shape{1, 32, 32}));
ASSERT_EQ(gelu->get_approximation_mode(), op::GeluApproximationMode::ERF);
}

TEST(type_prop, gelu_tanh_mode_inference_f32)
{
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 32, 32});
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);

ASSERT_EQ(gelu->get_element_type(), element::f32);
ASSERT_EQ(gelu->get_shape(), (Shape{1, 32, 32}));
ASSERT_EQ(gelu->get_approximation_mode(), op::GeluApproximationMode::TANH);
}

TEST(type_prop, gelu_tanh_mode_inference_f16)
{
auto param = make_shared<op::Parameter>(element::f16, Shape{1, 32, 32});
auto gelu = make_shared<op::v7::Gelu>(param, op::GeluApproximationMode::TANH);

ASSERT_EQ(gelu->get_element_type(), element::f16);
ASSERT_EQ(gelu->get_shape(), (Shape{1, 32, 32}));
ASSERT_EQ(gelu->get_approximation_mode(), op::GeluApproximationMode::TANH);
}

TEST(type_prop, gelu_incompatible_input_type_boolean)
{
auto param = make_shared<op::Parameter>(element::boolean, Shape{1, 32, 32});
ASSERT_THROW(std::make_shared<op::v7::Gelu>(param), ngraph::NodeValidationFailure);
}

TEST(type_prop, gelu_incompatible_input_type_u16)
{
auto param = make_shared<op::Parameter>(element::u16, Shape{1, 32, 32});
ASSERT_THROW(std::make_shared<op::v7::Gelu>(param), ngraph::NodeValidationFailure);
}

TEST(type_prop, gelu_incompatible_input_type_i32)
{
auto param = make_shared<op::Parameter>(element::i32, Shape{1, 32, 32});
ASSERT_THROW(std::make_shared<op::v7::Gelu>(param), ngraph::NodeValidationFailure);
}

TEST(type_prop, gelu_incompatible_input_type_i16)
{
auto param = make_shared<op::Parameter>(element::i16, Shape{1, 32, 32});
ASSERT_THROW(std::make_shared<op::v7::Gelu>(param), ngraph::NodeValidationFailure);
}

TEST(type_prop, gelu_dynamic_rank_input_shape_2D)
{
const PartialShape param_shape{Dimension::dynamic(), 10};
const auto param = std::make_shared<op::Parameter>(element::f32, param_shape);
const auto op = std::make_shared<op::v7::Gelu>(param);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 10}));
}

TEST(type_prop, gelu_dynamic_rank_input_shape_3D)
{
const PartialShape param_shape{100, Dimension::dynamic(), 58};
const auto param = std::make_shared<op::Parameter>(element::f32, param_shape);
const auto op = std::make_shared<op::v7::Gelu>(param);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{100, Dimension(), 58}));
}

TEST(type_prop, gelu_dynamic_rank_input_shape_full)
{
const auto param = std::make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto op = std::make_shared<op::v7::Gelu>(param);
ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
27 changes: 27 additions & 0 deletions ngraph/test/visitors/op/gelu.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "gtest/gtest.h"

#include "ngraph/ngraph.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset7.hpp"

#include "util/visitor.hpp"

using namespace std;
using namespace ngraph;
using ngraph::test::NodeBuilder;

TEST(attributes, gelu_op)
{
NodeBuilder::get_ops().register_factory<opset7::Gelu>();
const auto data_input = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
const auto approximation_mode = op::GeluApproximationMode::ERF;
const auto gelu = make_shared<opset7::Gelu>(data_input, approximation_mode);
NodeBuilder builder(gelu);
auto g_gelu = as_type_ptr<opset7::Gelu>(builder.create());

EXPECT_EQ(g_gelu->get_approximation_mode(), gelu->get_approximation_mode());
}

0 comments on commit 314a1d7

Please sign in to comment.