From d32f7bef127efc1788dacb6fdb05ca2739187f85 Mon Sep 17 00:00:00 2001 From: Han-Chung Wang Date: Fri, 12 Aug 2022 03:55:40 +0800 Subject: [PATCH] Add quantized models x86_64 and riscv benchmark suites. (#10054) Fixes https://github.com/iree-org/iree/issues/9871 --- benchmarks/TFLite/CMakeLists.txt | 14 ++++++++++++++ benchmarks/TFLite/linux-riscv.cmake | 5 +++++ benchmarks/TFLite/linux-x86_64.cmake | 12 ++++++++++++ 3 files changed, 31 insertions(+) diff --git a/benchmarks/TFLite/CMakeLists.txt b/benchmarks/TFLite/CMakeLists.txt index b5a9d163c298..bc115f458b03 100644 --- a/benchmarks/TFLite/CMakeLists.txt +++ b/benchmarks/TFLite/CMakeLists.txt @@ -161,6 +161,20 @@ set(PERSON_DETECT_INT8_MODULE "1x96x96x1xi8" ) +set(EFFICIENTNET_INT8_MODULE + NAME + "EfficientNet" + TAGS + "int8" + SOURCE + # Mirror of https://tfhub.dev/tensorflow/lite-model/efficientnet/lite0/int8/2 + "https://storage.googleapis.com/iree-model-artifacts/efficientnet_lite0_int8_2.tflite" + ENTRY_FUNCTION + "main" + FUNCTION_INPUTS + "1x224x224x3xui8" +) + ################################################################################ # Add benchmarks for all platforms. # ################################################################################ diff --git a/benchmarks/TFLite/linux-riscv.cmake b/benchmarks/TFLite/linux-riscv.cmake index 3033db70d546..9aacd4cc4ab0 100644 --- a/benchmarks/TFLite/linux-riscv.cmake +++ b/benchmarks/TFLite/linux-riscv.cmake @@ -45,6 +45,9 @@ iree_benchmark_suite( "${DEEPLABV3_FP32_MODULE}" "${MOBILEBERT_FP32_MODULE}" "${MOBILENET_V1_MODULE}" + "${MOBILEBERT_INT8_MODULE}" + "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "full-inference,default-flags" @@ -70,7 +73,9 @@ iree_benchmark_suite( "linux-riscv" MODULES + "${MOBILEBERT_INT8_MODULE}" "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "full-inference,default-flags" diff --git a/benchmarks/TFLite/linux-x86_64.cmake b/benchmarks/TFLite/linux-x86_64.cmake index 2f8d6ffe3626..fa8d64cfdd34 100644 --- a/benchmarks/TFLite/linux-x86_64.cmake +++ b/benchmarks/TFLite/linux-x86_64.cmake @@ -34,6 +34,9 @@ iree_benchmark_suite( "${MOBILEBERT_FP32_MODULE}" "${MOBILENET_V2_MODULE}" "${MOBILENET_V3SMALL_MODULE}" + "${MOBILEBERT_INT8_MODULE}" + "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "full-inference,default-flags" @@ -63,6 +66,9 @@ iree_benchmark_suite( "${MOBILEBERT_FP32_MODULE}" "${MOBILENET_V2_MODULE}" "${MOBILENET_V3SMALL_MODULE}" + "${MOBILEBERT_INT8_MODULE}" + "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "1-thread,full-inference,default-flags" @@ -94,6 +100,9 @@ iree_benchmark_suite( "${MOBILEBERT_FP32_MODULE}" "${MOBILENET_V2_MODULE}" "${MOBILENET_V3SMALL_MODULE}" + "${MOBILEBERT_INT8_MODULE}" + "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "4-thread,full-inference,default-flags" @@ -125,6 +134,9 @@ iree_benchmark_suite( "${MOBILEBERT_FP32_MODULE}" "${MOBILENET_V2_MODULE}" "${MOBILENET_V3SMALL_MODULE}" + "${MOBILEBERT_INT8_MODULE}" + "${PERSON_DETECT_INT8_MODULE}" + "${EFFICIENTNET_INT8_MODULE}" BENCHMARK_MODES "8-thread,full-inference,default-flags"