Skip to content

Commit

Permalink
Compilation with conda build (openvinotoolkit#13083)
Browse files Browse the repository at this point in the history
* Compilation with conda forge

* Fixed compilation with gcc 12

* Fixed tests compilation

* use macro instead of function

* Fixed uninitialized variable in tests

* Conda

* Conda
  • Loading branch information
ilya-lavrenov authored Oct 5, 2022
1 parent e154562 commit 1bb15b1
Show file tree
Hide file tree
Showing 33 changed files with 146 additions and 64 deletions.
50 changes: 25 additions & 25 deletions cmake/developer_package/compile_flags/os_flags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -69,94 +69,94 @@ endmacro()
#
# Provides SSE4.2 compilation flags depending on an OS and a compiler
#
function(ie_sse42_optimization_flags flags)
macro(ie_sse42_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# No such option for MSVC 2019
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} /QxSSE4.2 PARENT_SCOPE)
set(${flags} /QxSSE4.2)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} -xSSE4.2 PARENT_SCOPE)
set(${flags} -xSSE4.2)
else()
set(${flags} -msse4.2 PARENT_SCOPE)
set(${flags} -msse4.2)
endif()
endif()
endfunction()
endmacro()

#
# Provides AVX2 compilation flags depending on an OS and a compiler
#
function(ie_avx2_optimization_flags flags)
macro(ie_avx2_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} /QxCORE-AVX2 PARENT_SCOPE)
set(${flags} /QxCORE-AVX2)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(${flags} /arch:AVX2 PARENT_SCOPE)
set(${flags} /arch:AVX2)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} -xCORE-AVX2 PARENT_SCOPE)
set(${flags} -xCORE-AVX2)
else()
set(${flags} -mavx2 -mfma PARENT_SCOPE)
set(${flags} -mavx2 -mfma)
endif()
endif()
endfunction()
endmacro()

#
# Provides common AVX512 compilation flags for AVX512F instruction set support
# depending on an OS and a compiler
#
function(ie_avx512_optimization_flags flags)
macro(ie_avx512_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} /QxCOMMON-AVX512 PARENT_SCOPE)
set(${flags} /QxCOMMON-AVX512)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(${flags} /arch:AVX512 PARENT_SCOPE)
set(${flags} /arch:AVX512)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} -xCOMMON-AVX512 PARENT_SCOPE)
set(${flags} -xCOMMON-AVX512)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(${flags} -mavx512f -mfma PARENT_SCOPE)
set(${flags} -mavx512f -mfma)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Clang|AppleClang)$")
set(${flags} -mavx512f -mfma PARENT_SCOPE)
set(${flags} -mavx512f -mfma)
endif()
endif()
endfunction()
endmacro()

function(ie_arm_neon_optimization_flags flags)
macro(ie_arm_neon_optimization_flags flags)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# nothing
elseif(ANDROID)
if(ANDROID_ABI STREQUAL "arm64-v8a")
set(${flags} -mfpu=neon PARENT_SCOPE)
set(${flags} -mfpu=neon)
elseif(ANDROID_ABI STREQUAL "armeabi-v7a-hard with NEON")
set(${flags} -march=armv7-a -mfloat-abi=hard -mhard-float -D_NDK_MATH_NO_SOFTFP=1 -mfpu=neon PARENT_SCOPE)
set(${flags} -march=armv7-a -mfloat-abi=hard -mhard-float -D_NDK_MATH_NO_SOFTFP=1 -mfpu=neon)
elseif((ANDROID_ABI STREQUAL "armeabi-v7a with NEON") OR
(ANDROID_ABI STREQUAL "armeabi-v7a" AND
DEFINED CMAKE_ANDROID_ARM_NEON AND CMAKE_ANDROID_ARM_NEON))
set(${flags} -march=armv7-a -mfloat-abi=softfp -mfpu=neon PARENT_SCOPE)
set(${flags} -march=armv7-a -mfloat-abi=softfp -mfpu=neon)
endif()
else()
if(AARCH64)
set(${flags} -O2 -ftree-vectorize PARENT_SCOPE)
set(${flags} -O2 -ftree-vectorize)
elseif(ARM)
set(${flags} -mfpu=neon PARENT_SCOPE)
set(${flags} -mfpu=neon)
endif()
endif()
endfunction()
endmacro()

#
# Disables all warnings for 3rd party targets
Expand Down
6 changes: 4 additions & 2 deletions cmake/developer_package/target_flags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,10 @@ endif()

if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
set(OV_COMPILER_IS_CLANG ON)
else()
set(OV_COMPILER_IS_CLANG OFF)
endif()

if(CMAKE_CXX_COMPILER MATCHES ".*conda.*")
set(OV_COMPILER_IS_CONDA ON)
endif()

get_property(OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
7 changes: 4 additions & 3 deletions samples/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,10 @@ endif()
####################################

if(NOT TARGET gflags)
if(NOT APPLE)
# on Apple only dynamic libraries are available
# also, we can easily mix arm64 and x86_64 binaries when cross-compile for Intel CPU
if(NOT APPLE AND NOT DEFINED ENV{CONDA_PREFIX})
# - conda-forge contains only dynamic libraries
# - on Apple only dynamic libraries are available
# also, we can easily mix arm64 and x86_64 binaries when cross-compile for Intel CPU
find_package(gflags QUIET COMPONENTS nothreads_static)
endif()

Expand Down
15 changes: 9 additions & 6 deletions src/common/preprocessing/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,26 @@ file(GLOB LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
# Provides compilation flags for AVX512F, AVX512BW, AVX512DQ
# instructions support depending on an OS and a compiler
#
function(ie_avx512_core_optimization_flags flags)
macro(ie_avx512_core_optimization_flags flags)
if(WIN32)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} /QxCORE-AVX512 PARENT_SCOPE)
set(${flags} /QxCORE-AVX512)
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(${flags} /arch:AVX512 PARENT_SCOPE)
set(${flags} /arch:AVX512)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(${flags} -xCORE-AVX512 PARENT_SCOPE)
set(${flags} -xCORE-AVX512)
else()
set(${flags} -mavx512f -mavx512bw -mavx512dq -mfma PARENT_SCOPE)
set(${flags} -mavx512f -mavx512bw -mavx512dq -mfma)
if(OV_COMPILER_IS_CONDA)
list(APPEND ${flags} -Wno-error=maybe-uninitialized -Wno-maybe-uninitialized)
endif()
endif()
endif()
endfunction()
endmacro()

if(ENABLE_SSE42)
file(GLOB SSE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/cpu_x86_sse42/*.cpp)
Expand Down
4 changes: 4 additions & 0 deletions src/plugins/intel_cpu/thirdparty/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ function(ie_add_onednn)
if(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
ie_add_compiler_flags(-Wno-undef)
ie_add_compiler_flags(-Wno-missing-declarations)
if(OV_COMPILER_IS_CONDA)
ie_add_compiler_flags(-Wno-error=array-bounds)
ie_add_compiler_flags(-Wno-error=stringop-overflow=)
endif()
elseif(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -diag-disable=10121")
endif()
Expand Down
19 changes: 10 additions & 9 deletions src/plugins/intel_gna/cmake/libGNAConfig.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -21,28 +21,29 @@ endforeach()
set(libGNA_LIBRARIES_BASE_PATH ${GNA_PATH} CACHE STRING "" FORCE)

if(libGNA_FIND_REQUIRED_KERNEL)
find_library(GNA_KERNEL_LIBRARY
${GNA_KERNEL_LIB_NAME}
HINTS
${libGNA_LIBRARIES_BASE_PATH})
find_library(GNA_KERNEL_LIBRARY ${GNA_KERNEL_LIB_NAME}
HINTS ${libGNA_LIBRARIES_BASE_PATH}
NO_CMAKE_FIND_ROOT_PATH)

if(NOT GNA_KERNEL_LIBRARY MATCHES NOTFOUND)
if(GNA_KERNEL_LIBRARY)
add_library(libGNA::KERNEL SHARED IMPORTED)
set_target_properties(libGNA::KERNEL PROPERTIES IMPORTED_LOCATION ${GNA_KERNEL_LIBRARY})
set(libGNA_KERNEL_FOUND TRUE)
else()
message(SEND_ERROR "GNA KERNEL library was not found in ${libGNA_LIBRARIES_BASE_PATH}")
message(SEND_ERROR "GNA KERNEL library (${GNA_KERNEL_LIB_NAME}) was not found in ${libGNA_LIBRARIES_BASE_PATH}")
endif()
endif()

if(libGNA_FIND_REQUIRED_API)
find_path(libGNA_INCLUDE_DIRS gna2-api.h PATHS "${GNA_EXT_DIR}/include")
if(NOT libGNA_INCLUDE_DIRS MATCHES NOTFOUND)
find_path(libGNA_INCLUDE_DIRS gna2-api.h
PATHS "${GNA_EXT_DIR}/include"
NO_CMAKE_FIND_ROOT_PATH)
if(libGNA_INCLUDE_DIRS)
add_library(libGNA::API INTERFACE IMPORTED)
set_target_properties(libGNA::API PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${libGNA_INCLUDE_DIRS})
set(libGNA_API_FOUND TRUE)
else()
message(SEND_ERROR "GNA API headers was not found in ${GNA_EXT_DIR}/include")
message(SEND_ERROR "GNA API headers (gna2-api.h) was not found in ${GNA_EXT_DIR}/include")
endif()
endif()

Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/border.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ std::vector<layout> border_inst::calc_output_layouts(border_node const& /*node*/
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> border_inst::calc_output_layouts<ov::PartialShape>(border_node const& node, const kernel_impl_params& impl_param);

std::string border_inst::to_string(border_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/broadcast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ std::vector<layout> broadcast_inst::calc_output_layouts(broadcast_node const& /*
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> broadcast_inst::calc_output_layouts<ov::PartialShape>(broadcast_node const& node, const kernel_impl_params& impl_param);

std::string broadcast_inst::to_string(broadcast_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/concatenation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ std::vector<layout> concatenation_inst::calc_output_layouts(const concatenation_
return { layout {output_shape, output_dt, output_format} };
}

template std::vector<layout> concatenation_inst::calc_output_layouts<ov::PartialShape>(concatenation_node const& node, const kernel_impl_params& impl_param);

std::string concatenation_inst::to_string(concatenation_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,8 @@ std::vector<layout> eltwise_inst::calc_output_layouts(eltwise_node const& /*node
return { output_layout };
}

template std::vector<layout> eltwise_inst::calc_output_layouts<ov::PartialShape>(eltwise_node const& node, const kernel_impl_params& impl_param);

static inline std::string stringify_vector(const std::vector<float>& v) {
std::stringstream s;

Expand Down
3 changes: 3 additions & 0 deletions src/plugins/intel_gpu/src/graph/fully_connected.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,9 @@ std::vector<layout> fully_connected_inst::calc_output_layouts(fully_connected_no
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> fully_connected_inst::calc_output_layouts<ov::PartialShape>(fully_connected_node const& node,
const kernel_impl_params& impl_param);

std::string fully_connected_inst::to_string(fully_connected_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/gather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ std::vector<layout> gather_inst::calc_output_layouts(gather_node const& /*node*/
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> gather_inst::calc_output_layouts<ov::PartialShape>(gather_node const& node, const kernel_impl_params& impl_param);

std::string gather_inst::to_string(gather_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
3 changes: 3 additions & 0 deletions src/plugins/intel_gpu/src/graph/gather_elements.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@ std::vector<layout> gather_elements_inst::calc_output_layouts(gather_elements_no
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> gather_elements_inst::calc_output_layouts<ov::PartialShape>(gather_elements_node const& node,
const kernel_impl_params& impl_param);

std::string gather_elements_inst::to_string(gather_elements_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/gemm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@ std::vector<layout> gemm_inst::calc_output_layouts(gemm_node const& /*node*/, co
return { layout{output_shapes[0], output_type, input0_layout.format, prim->output_padding} };
}

template std::vector<layout> gemm_inst::calc_output_layouts<ov::PartialShape>(gemm_node const& node, const kernel_impl_params& impl_param);

std::string gemm_inst::to_string(gemm_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ std::vector<layout> mvn_inst::calc_output_layouts(mvn_node const& /*node*/, cons
return { layout{output_shape, output_type, output_format} };
}

template std::vector<layout> mvn_inst::calc_output_layouts<ov::PartialShape>(mvn_node const& node, const kernel_impl_params& impl_param);

std::string mvn_inst::to_string(mvn_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/one_hot.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ std::vector<layout> one_hot_inst::calc_output_layouts(const one_hot_node& /*node
return {{output_shapes[0], dt, format::get_default_format(output_shapes[0].size())}};
}

template std::vector<layout> one_hot_inst::calc_output_layouts<ov::PartialShape>(one_hot_node const& node, const kernel_impl_params& impl_param);

std::string one_hot_inst::to_string(one_hot_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/permute.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ std::vector<layout> permute_inst::calc_output_layouts(permute_node const& /*node
return { layout{output_shape, output_type, input_layout.format, desc->output_padding} };
}

template std::vector<layout> permute_inst::calc_output_layouts<ov::PartialShape>(permute_node const& node, const kernel_impl_params& impl_param);

std::string permute_inst::to_string(permute_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ std::vector<layout> quantize_inst::calc_output_layouts(quantize_node const&, ker
return { layout{input_layout.get<ShapeType>(), out_dt, output_format} };
}

template std::vector<layout> quantize_inst::calc_output_layouts<ov::PartialShape>(quantize_node const& node, const kernel_impl_params& impl_param);

std::string quantize_inst::to_string(quantize_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/reduce.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,8 @@ std::vector<layout> reduce_inst::calc_output_layouts(reduce_node const& /*node*/
return { layout{output_shapes[0], output_type, output_format} };
}

template std::vector<layout> reduce_inst::calc_output_layouts<ov::PartialShape>(reduce_node const& node, const kernel_impl_params& impl_param);

std::string reduce_inst::to_string(reduce_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/resample.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ std::vector<layout> resample_inst::calc_output_layouts(resample_node const& /*no
return { layout{output_shapes[0], input_layout.data_type, format::adjust_to_rank(input_layout.format, output_shapes[0].size())} };
}

template std::vector<layout> resample_inst::calc_output_layouts<ov::PartialShape>(resample_node const& node, const kernel_impl_params& impl_param);

std::string resample_inst::to_string(resample_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/reshape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ std::vector<layout> reshape_inst::calc_output_layouts(reshape_node const& /*node
return { layout{output_shapes[0], input_layout.data_type, format::adjust_to_rank(input_layout.format, output_shapes[0].size())} };
}

template std::vector<layout> reshape_inst::calc_output_layouts<ov::PartialShape>(reshape_node const& node, const kernel_impl_params& impl_param);

std::string reshape_inst::to_string(reshape_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/scatter_update.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ std::vector<layout> scatter_update_inst::calc_output_layouts(scatter_update_node
return { layout{output_shape, output_type, output_format} };
}

template std::vector<layout> scatter_update_inst::calc_output_layouts<ov::PartialShape>(scatter_update_node const& node, const kernel_impl_params& impl_param);

std::string scatter_update_inst::to_string(scatter_update_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/shape_of.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ std::vector<layout> shape_of_inst::calc_output_layouts(shape_of_node const& /*no
return { layout{output_shape, output_dt, format::bfyx} };
}

template std::vector<layout> shape_of_inst::calc_output_layouts<ov::PartialShape>(shape_of_node const& node, const kernel_impl_params& impl_param);

std::string shape_of_inst::to_string(shape_of_node const& node) {
auto node_info = node.desc_to_json();
auto desc = node.get_primitive();
Expand Down
2 changes: 2 additions & 0 deletions src/plugins/intel_gpu/src/graph/strided_slice.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ std::vector<layout> strided_slice_inst::calc_output_layouts(strided_slice_node c
return { layout{output_shapes[0], input0_layout.data_type, output_format} };
}

template std::vector<layout> strided_slice_inst::calc_output_layouts<ov::PartialShape>(strided_slice_node const& node, const kernel_impl_params& impl_param);

std::string strided_slice_inst::to_string(strided_slice_node const& node) {
auto desc = node.get_primitive();
auto node_info = node.desc_to_json();
Expand Down
Loading

0 comments on commit 1bb15b1

Please sign in to comment.