Skip to content

Commit

Permalink
Add global config for test infra (openvinotoolkit#17547)
Browse files Browse the repository at this point in the history
* [IE TESTS] Add Global test config for Subgraph base test

* Replace using option by function redefinition

* fix build

* remove extra changes for gna/template

* code style

* add nvidia to devices

* Fix debian

* remove nvidia
  • Loading branch information
iefode authored Jun 23, 2023
1 parent eb43f40 commit 31b07c4
Show file tree
Hide file tree
Showing 8 changed files with 79 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
//

#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
// Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is.
Expand All @@ -11,3 +12,17 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
if (!test->configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) {
test->configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
#endif
}

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <string>

#include "functional_test_utils/blob_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
const float MAX_VAL_2B_FEAT = 16384.0f;
Expand Down Expand Up @@ -57,3 +58,11 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
configuration[scaleFactorConfigKey] = std::to_string(floatScaleFactor);
}
}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {}

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
//

#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
Expand All @@ -18,3 +19,23 @@ void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::map<std::string, std::string> config = {{"INFERENCE_PRECISION_HINT", hint.get_type_name()}};
core->SetConfig(config, CommonTestUtils::DEVICE_GPU);
}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {
ov::element::Type hint = ov::element::f32;
for (auto& param : test->function->get_parameters()) {
if (param->get_output_element_type(0) == ov::element::f16) {
hint = ov::element::f16;
break;
}
}

// Set inference_precision hint to run fp32 model in fp32 runtime precision as default plugin execution precision
// may vary
test->core->set_property(CommonTestUtils::DEVICE_GPU, {{ov::hint::inference_precision.name(), hint.get_type_name()}});
}

} // namespace test
} // namespace ov
9 changes: 9 additions & 0 deletions src/plugins/template/tests/functional/core_config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@
//

#include "functional_test_utils/core_config.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {}

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@

#include "functional_test_utils/core_config.hpp"
#include "common_test_utils/file_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "openvino/util/file_util.hpp"

#include "conformance.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
auto availableDevices = core->GetAvailableDevices();
std::string targetDevice = std::string(ov::test::conformance::targetDevice);
if (std::find(availableDevices.begin(), availableDevices.end(), targetDevice) == availableDevices.end()) {
core->RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string(ov::test::conformance::targetPluginName) + IE_BUILD_POSTFIX),
ov::test::conformance::targetDevice);
}
}
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {}

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@

#include "functional_test_utils/core_config.hpp"
#include "common_test_utils/file_utils.hpp"
#include "shared_test_classes/base/ov_subgraph.hpp"
#include "openvino/util/file_util.hpp"

#include "conformance.hpp"

void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {
std::shared_ptr<InferenceEngine::Core> core = PluginCache::get().ie();
auto availableDevices = core->GetAvailableDevices();
std::string targetDevice = std::string(ov::test::conformance::targetDevice);
if (std::find(availableDevices.begin(), availableDevices.end(), targetDevice) == availableDevices.end()) {
core->RegisterPlugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string(ov::test::conformance::targetPluginName) + IE_BUILD_POSTFIX),
ov::test::conformance::targetDevice);
}
}
void CoreConfiguration(LayerTestsUtils::LayerTestsCommon* test) {}

namespace ov {
namespace test {

void core_configuration(ov::test::SubgraphBaseTest* test) {}

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ class SubgraphBaseTest : public CommonTestUtils::TestsCommon {

virtual std::vector<ov::Tensor> calculate_refs();
virtual std::vector<ov::Tensor> get_plugin_outputs();

friend void core_configuration(SubgraphBaseTest* test);
};

inline std::vector<InputShape> static_partial_shapes_to_test_representation(const std::vector<ov::PartialShape>& shapes) {
Expand Down
24 changes: 3 additions & 21 deletions src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,9 @@
#include "ngraph_functions/utils/ngraph_helpers.hpp"

#include "common_test_utils/file_utils.hpp"
#include "functional_test_utils/crash_handler.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "common_test_utils/ov_tensor_utils.hpp"
#include "functional_test_utils/crash_handler.hpp"
#include "functional_test_utils/skip_tests_config.hpp"

#include "shared_test_classes/base/ov_subgraph.hpp"
Expand Down Expand Up @@ -210,26 +211,7 @@ void SubgraphBaseTest::compile_model() {
if (functionRefs == nullptr) {
functionRefs = function->clone();
}

// Within the test scope we don't need any implicit bf16 optimisations, so let's run the network as is.
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)
if (targetDevice == CommonTestUtils::DEVICE_CPU && !configuration.count(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16)) {
configuration.insert({InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO});
}
#endif

// Set inference_precision hint to run fp32 model in fp32 runtime precision as default plugin execution precision may vary
if (targetDevice == CommonTestUtils::DEVICE_GPU) {
ov::element::Type hint = ov::element::f32;
for (auto& param : function->get_parameters()) {
if (param->get_output_element_type(0) == ov::element::f16) {
hint = ov::element::f16;
break;
}
}
configuration.insert({ov::hint::inference_precision.name(), hint});
}

core_configuration(this);
compiledModel = core->compile_model(function, targetDevice, configuration);
if (is_report_stages) {
auto end_time = std::chrono::system_clock::now();
Expand Down

0 comments on commit 31b07c4

Please sign in to comment.