[Mlir-commits] [mlir] [mlir][nfc] Update 2 tests for PadOpVectorizationWithTransferWritePattern (PR #122721)
Andrzej WarzyĆski
llvmlistbot at llvm.org
Mon Jan 13 07:00:11 PST 2025
https://github.com/banach-space created https://github.com/llvm/llvm-project/pull/122721
* Relocates two tests for `PadOpVectorizationWithTransferWritePattern`
in "vectorization-pad-patterns.mlir" to group them with other tests
for the same pattern.
* Adds a note clarifying that these are negative tests and explains the
reasoning behind them.
* Removes `transform.apply_patterns.linalg.decompose_pad` from the TD
sequences as it's no longer needed (*).
This is essentially a small clean-up in preparation for upcoming changes.
(*) `transform.apply_patterns.linalg.decompose_pad` was split off from
`transform.apply_patterns.linalg.pad_vectorization` in #117329.
"vectorization-pad-patterns.mlir" is meant to test the latter, not the
former.
>From e638f5bf0ecfe49e3b8d649e7ea039371fbb686c Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Mon, 13 Jan 2025 14:52:29 +0000
Subject: [PATCH] [mlir][nfc] Update 2 tests for
PadOpVectorizationWithTransferWritePattern
* Relocates two tests for `PadOpVectorizationWithTransferWritePattern`
in "vectorization-pad-patterns.mlir" to group them with other tests
for the same pattern.
* Adds a note clarifying that these are negative tests and explains the
reasoning behind them.
* Removes `transform.apply_patterns.linalg.decompose_pad` from the TD
sequences as it's no longer needed (*).
This is essentially a small clean-up in preparation for upcoming changes.
(*) `transform.apply_patterns.linalg.decompose_pad` was split off from
`transform.apply_patterns.linalg.pad_vectorization` in #117329.
"vectorization-pad-patterns.mlir" is meant to test the latter, not the
former.
---
.../Linalg/vectorization-pad-patterns.mlir | 140 +++++++++---------
1 file changed, 68 insertions(+), 72 deletions(-)
diff --git a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
index 41e480648177f5..08a3bbbb301c87 100644
--- a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
@@ -114,6 +114,74 @@ module attributes {transform.with_named_sequence} {
}
}
+// -----
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// Negative test - low pad is non-zero
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_low_pad(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
+
+// -----
+
+// Negative test - TransferWriteOp result is not _directly_ consumed by an
+// ExtractSliceOp (noet the non-zero offset).
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_offset(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 0] high[5, 7] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
// -----
@@ -209,75 +277,3 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_low_pad(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_offset(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
More information about the Mlir-commits
mailing list