Skip to content

Commit

Permalink
e2e matmul test improvements (iree-org#18725)
Browse files Browse the repository at this point in the history
This PR is made of individual commits for review convenience and so we
can drop anything that causes problems on CI.

* Add default shapes set, combining small and large.
* The need to specify "small" or "large" is a real need in only a
minority of cases. That's a difference from when these tests were first
added.
* Enable dynamic sizes in large shapes, leaving only gpu_large_aligned
out.
* Who remembered that large shapes weren't tested as dynamic shapes,
unlike small shapes... and unlike "gpu_large" shapes?!
* Rename gpu_large_aligned -> easy_large_static.
* This is only needed in sketchy GPU codegen pipelines that can't deal
with sizes that aren't multiples of some internal tile size.
* Fold gpu_large into large and tolerate fuzzy bf16 accumulators.
* Retaining the evidently more curated set of shapes from "gpu_large".
The larger sizes ran into new issues with the mostly artificial case of
bf16 accumulators.
* Use default shapes and reenable sanitizers.
* This simplifies the build, reduces the number of targets and increases
coverage as "default" combines small and large shapes. And this
reenables sanitizers that hard been disabled on large sizes due to
timeouts. As tests at some point started verifying only a subset of
result matrix elements, the timeouts should be avoided now.
* Enable default shapes for most rocm tests.
  * The motivation for this PR. The rest just bubbled up from there.
* Make large shapes more diverse (including odd and rectangular kinds of
shapes).

---------

Signed-off-by: Benoit Jacob <[email protected]>
  • Loading branch information
bjacob authored Oct 9, 2024
1 parent 5270093 commit eb15493
Show file tree
Hide file tree
Showing 4 changed files with 202 additions and 1,145 deletions.
68 changes: 17 additions & 51 deletions tests/e2e/matmul/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ py_binary(

# LLVMCPU, non-data-tiling, no microkernels
[iree_generated_e2e_runner_test(
name = "e2e_matmul_cpu_nondt_%s_%s_%s" % (lhs_rhs_type, acc_type, size),
name = "e2e_matmul_cpu_nondt_%s_%s" % (lhs_rhs_type, acc_type),
compiler_flags = [
"--iree-opt-data-tiling=false",
"--iree-llvmcpu-enable-ukernels=none",
Expand All @@ -37,7 +37,6 @@ py_binary(
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--acc_type=%s" % acc_type,
"--shapes=%s" % size,
],
tags = [
# f16/bf16 trigger internal LLVM assertion errors on riscv and wasm.
Expand All @@ -60,9 +59,6 @@ py_binary(
# TODO(#15258): enable bf16 tests when that bug is fixed.
# ("bf16", "bf16"),
# ("bf16", "f32"),
] for size in [
"small",
"large",
]]

PREPROCESSING_TRANSPOSE_LHS = "--iree-preprocessing-pass-pipeline=builtin.module\\(util.func\\(iree-preprocessing-transpose-matmul-pass{input=lhs}\\)\\)"
Expand All @@ -71,9 +67,8 @@ PREPROCESSING_PEEL = "--iree-llvmcpu-vector-pproc-strategy=peel"

# LLVMCPU, non-data-tiling, no microkernels, ArmSME
[iree_generated_e2e_runner_test(
name = "e2e_matmul_cpu_arm_sme_nondt_%s_%s%s%s" % (
name = "e2e_matmul_cpu_arm_sme_nondt_%s_%s%s" % (
dtype,
size,
"_transpose_lhs" if transpose_lhs else "",
"_peel" if peel else "",
),
Expand All @@ -87,7 +82,6 @@ PREPROCESSING_PEEL = "--iree-llvmcpu-vector-pproc-strategy=peel"
generator_args = [
"--lhs_rhs_type=%s" % dtype,
"--acc_type=%s" % dtype,
"--shapes=%s" % size,
],
tags = [
"requires-arm-sme",
Expand All @@ -101,9 +95,6 @@ PREPROCESSING_PEEL = "--iree-llvmcpu-vector-pproc-strategy=peel"
) for dtype in [
"f32",
# "f64" (also supported for ArmSME, but not by the test generator)
] for size in [
"small",
"large",
] for transpose_lhs in [
True,
False,
Expand Down Expand Up @@ -137,11 +128,10 @@ X86_64_AVX512_BF16 = X86_64_AVX512 + [

# LLVMCPU, data-tiling, data-tiling + ukernels.
[iree_generated_e2e_runner_test(
name = "e2e_matmul_cpu_dt%s_%s_%s_%s" % (
name = "e2e_matmul_cpu_dt%s_%s_%s" % (
("_uk" if use_uk else ""),
lhs_rhs_type,
acc_type,
size,
),
compiler_flags = [
"--iree-opt-data-tiling",
Expand All @@ -150,14 +140,8 @@ X86_64_AVX512_BF16 = X86_64_AVX512 + [
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--acc_type=%s" % acc_type,
"--shapes=%s" % size,
],
tags = ([
# "--shapes=large" can cause timeouts on sanitizers.
"noasan",
"notsan",
] if size == "large" else []) + ([
# "--shapes=large" can cause timeouts on RISC-V emulator.
# f16/bf16 trigger internal LLVM assertion errors on riscv and wasm.
"noriscv",
"nowasm",
Expand Down Expand Up @@ -206,18 +190,14 @@ X86_64_AVX512_BF16 = X86_64_AVX512 + [
("bf16", "bf16"),
("bf16", "f32"),
]
) for size in [
"small",
"large",
]]
)]

# LLVMCPU, data-tiling, data-tiling + ukernels + late materialization.
[iree_generated_e2e_runner_test(
name = "e2e_matmul_cpu_experimental_dt%s_%s_%s_%s" % (
name = "e2e_matmul_cpu_experimental_dt%s_%s_%s" % (
("_uk" if use_uk else ""),
lhs_rhs_type,
acc_type,
size,
),
compiler_flags = [
"--iree-opt-data-tiling",
Expand All @@ -227,14 +207,8 @@ X86_64_AVX512_BF16 = X86_64_AVX512 + [
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--acc_type=%s" % acc_type,
"--shapes=%s" % size,
],
tags = ([
# "--shapes=large" can cause timeouts on sanitizers.
"noasan",
"notsan",
] if size == "large" else []) + ([
# "--shapes=large" can cause timeouts on RISC-V emulator.
# f16/bf16 trigger internal LLVM assertion errors on riscv and wasm.
"noriscv",
"nowasm",
Expand Down Expand Up @@ -283,10 +257,7 @@ X86_64_AVX512_BF16 = X86_64_AVX512 + [
("bf16", "bf16"),
("bf16", "f32"),
]
) for size in [
"small",
"large",
]]
)]

[iree_generated_e2e_runner_test(
name = "e2e_matmul_vmvx_experimental_dt%s_%s_%s" % (
Expand Down Expand Up @@ -412,7 +383,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f32",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=LLVMGPUMatmulSimt",
],
tags = [
Expand Down Expand Up @@ -440,7 +411,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f32",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=LLVMGPUMatmulTensorCore",
],
tags = [
Expand All @@ -459,14 +430,13 @@ iree_generated_e2e_runner_test(
)

iree_generated_e2e_runner_test(
name = "e2e_matmul_cuda_f32_large_unaligned",
name = "e2e_matmul_cuda_f32",
compiler_flags = [
"--iree-cuda-target=sm_80",
],
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f32",
"--shapes=gpu_large",
],
tags = [
# CUDA cuInit fails with sanitizer on.
Expand All @@ -484,14 +454,13 @@ iree_generated_e2e_runner_test(
)

iree_generated_e2e_runner_test(
name = "e2e_matmul_cuda_f16_large_unaligned",
name = "e2e_matmul_cuda_f16",
compiler_flags = [
"--iree-cuda-target=sm_80",
],
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f16",
"--shapes=gpu_large",
],
tags = [
# CUDA cuInit fails with sanitizer on.
Expand All @@ -517,7 +486,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f32",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=LLVMGPUMatmulTensorCoreMmaSync",
],
tags = [
Expand All @@ -544,7 +513,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f16",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=LLVMGPUMatmulTensorCore",
],
tags = [
Expand All @@ -571,7 +540,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f16",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=LLVMGPUMatmulTensorCoreMmaSync",
],
tags = [
Expand All @@ -590,14 +559,13 @@ iree_generated_e2e_runner_test(
)

[iree_generated_e2e_runner_test(
name = "e2e_matmul_cuda_%s_large_splitk" % lhs_rhs_type,
name = "e2e_matmul_cuda_%s_splitk" % lhs_rhs_type,
compiler_flags = [
"--iree-dispatch-creation-split-matmul-reduction=4",
],
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--shapes=large",
],
tags = [
# CUDA cuInit fails with sanitizer on.
Expand All @@ -606,8 +574,6 @@ iree_generated_e2e_runner_test(
"notsan",
"noubsan",
"requires-gpu-nvidia",
# "--shapes=large" can cause timeouts on riscv emulator.
"noriscv",
],
target_backends_and_drivers = [
("cuda", "cuda"),
Expand All @@ -632,7 +598,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=SPIRVVectorizeMali",
],
tags = [
Expand All @@ -659,7 +625,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=%s" % lhs_rhs_type,
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=SPIRVVectorizeNVIDIA",
],
tags = [
Expand All @@ -685,7 +651,7 @@ iree_generated_e2e_runner_test(
generator = ":generate_e2e_matmul_tests",
generator_args = [
"--lhs_rhs_type=f16",
"--shapes=gpu_large_aligned",
"--shapes=easy_large_static",
"--compilation_info=SPIRVCooperativeMatrixVectorize",
],
runner_args = [
Expand Down
Loading

0 comments on commit eb15493

Please sign in to comment.