Skip to content

Commit

Permalink
Reactivate transform dialect tests disabled in iree-org#9489. (iree-o…
Browse files Browse the repository at this point in the history
  • Loading branch information
nicolasvasilache authored Jun 15, 2022
1 parent 5d17828 commit 0d66300
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 14 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
// TODO(#9510): Enable the test.
// XFAIL:*
// RUN: iree-dialects-opt --transform-dialect-interpreter %s | FileCheck %s

func.func @fun_to_benchmark(%arg0: tensor<128x128xf32>, %arg1: tensor<128x128xf32>, %arg2: tensor<128x128xf32>) ->
Expand All @@ -26,11 +25,13 @@ transform.with_pdl_patterns {
%0 = pdl_match @isa_linalg.matmul in %arg1
%tiled_linalg_op, %loops:3 = transform.structured.tile %0 {interchange = [1, 0, 2], sizes = [6, 16, 32]}
%1 = transform.loop.peel %loops#0

%tiled_and_peeled_linalg_op = pdl_match @isa_linalg.matmul in %1
// This test checks the proper handling of the scalarize dims attribute.
// The first dimension does not divide but we can always scalarize a `?` into `1`
// and enable vectorization of a lower-rank op this way.
%tiled_linalg_op_0 = transform.structured.scalarize %tiled_linalg_op
%parent = transform.get_closest_isolated_parent %tiled_linalg_op_0
%tiled_and_peeled_linalg_op_0 = transform.structured.scalarize %tiled_and_peeled_linalg_op
%parent = transform.get_closest_isolated_parent %tiled_and_peeled_linalg_op_0
transform.structured.vectorize %parent {vectorize_padding = false}
}
}
Original file line number Diff line number Diff line change
@@ -1,23 +1,22 @@
// TODO(#9510): Enable the test.
// XFAIL:*
// RUN: iree-dialects-opt --transform-dialect-interpreter %s | FileCheck %s

// CHECK-LABEL: func.func @matmul_tensors(
func.func @matmul_tensors(
%arg0: tensor<126x127xf32>, %arg1: tensor<127x128xf32>, %arg2: tensor<126x128xf32> { linalg.inplaceable = true})
-> tensor<126x128xf32> {
// CHECK-DAG: %[[c4:.*]] = arith.constant 4 : index
// CHECK-DAG: %[[c124:.*]] = arith.constant 124 : index
// CHECK-DAG: %[[c127:.*]] = arith.constant 127 : index
// CHECK-DAG: %[[c128:.*]] = arith.constant 128 : index

// CHECK: scf.for {{.*}} to %[[c124]]
// CHECK: scf.for {{.*}} to %[[c128]]
// CHECK: scf.for {{.*}} to %[[c124]]
// CHECK: scf.for {{.*}} to %[[c124]] step %[[c4]]
// CHECK: scf.for {{.*}} to %[[c128]] step %[[c4]]
// CHECK: scf.for {{.*}} to %[[c124]] step %[[c4]]
// CHECK: linalg.matmul ins({{.*}} : tensor<4x4xf32>, tensor<4x4xf32>) outs({{.*}} : tensor<4x4xf32>) -> tensor<4x4xf32>
// CHECK: linalg.matmul ins({{.*}} : tensor<4x3xf32>, tensor<3x4xf32>) outs({{.*}} : tensor<4x4xf32>) -> tensor<4x4xf32>
// CHECK: scf.for {{.*}} to %[[c128]]
// CHECK: scf.for {{.*}} to %[[c127]]
// CHECK: linalg.matmul ins({{.*}} : tensor<2x?xf32>, tensor<?x4xf32>) outs({{.*}} : tensor<2x4xf32>) -> tensor<2x4xf32>
// CHECK: scf.for {{.*}} to %[[c128]] step %[[c4]]
// CHECK: scf.for {{.*}} to %[[c124]] step %[[c4]]
// CHECK: linalg.matmul ins({{.*}} : tensor<2x4xf32>, tensor<4x4xf32>) outs({{.*}} : tensor<2x4xf32>) -> tensor<2x4xf32>
// CHECK: linalg.matmul ins({{.*}} : tensor<2x3xf32>, tensor<3x4xf32>) outs({{.*}} : tensor<2x4xf32>) -> tensor<2x4xf32>
%0 = linalg.matmul ins(%arg0, %arg1: tensor<126x127xf32>, tensor<127x128xf32>)
outs(%arg2: tensor<126x128xf32>)
-> tensor<126x128xf32>
Expand All @@ -44,9 +43,13 @@ transform.with_pdl_patterns {
%linalg_op, %loops:3 = transform.structured.tile %0 {sizes = [4, 4, 4]}

// Note: The order in which the loops are peeled is important. If %loop#2 is
// peeled first, the partial iteration of %loop#0 will also contain a peeled
// peeled first, the partial iteration of %loop#0 also contains a peeled
// version of %loop#2.
transform.loop.peel %loops#0
// Peeling #0 first is currently not possible as it will invalidate all the
// nested handles.
// TODO: extra arguments to specify parts of IR that should not be
// invalidated when we know that the transform updates in-place.
transform.loop.peel %loops#2
transform.loop.peel %loops#0
}
}

0 comments on commit 0d66300

Please sign in to comment.