[Mlir-commits] [mlir] [mlir][vector] Restrict DropInnerMostUnitDimsTransferWrite (PR #96218)
Andrzej WarzyĆski
llvmlistbot at llvm.org
Thu Jul 11 02:08:31 PDT 2024
https://github.com/banach-space updated https://github.com/llvm/llvm-project/pull/96218
>From 6f6d0c583ececcf0a17254d0c4f58dc237c2a2ca Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Thu, 20 Jun 2024 14:34:30 +0100
Subject: [PATCH 1/3] [mlir][vector] Restrict
DropInnerMostUnitDimsTransferWrite
Restrict `DropInnerMostUnitDimsTransferWrite` so that it fails when one
of the indices to be dropped could be != 0, e.g.
```mlir
func.func @negative_example(
%arg0: memref<16x1xf32>,
%arg1: vector<8x1xf32>,
%idx_1: index,
%idx_2: index) {
%c0 = arith.constant 0 : index
vector.transfer_write %arg1, %arg0[%idx_1, %idx_2] {in_bounds = [true, true]} : vector<8x1xf32>, memref<16x1xf32>
return
}
```
This is an edge case that could represent an out-of-bounds access,
though that will depend on the actual value of `%i`. Importantly,
_without this change_ it would be transformed as follows:
```mlir
func.func @negative_example(
%arg0: memref<16x1xf32>,
%arg1: vector<8x1xf32>,
%idx_1: index,
%idx_2: index) {
%subview = memref.subview %arg0[0, 0] [16, 1] [1, 1] : memref<16x1xf32> to memref<16xf32, strided<[1]>>
%0 = vector.shape_cast %arg1 : vector<8x1xf32> to vector<8xf32>
vector.transfer_write %0, %subview[%idx_1] {in_bounds = [true]} : vector<8xf32>, memref<16xf32, strided<[1]>>
return
}
```
This is incorrect - `%idx_2` is ignored. Hence the extra restriction to
avoid such cases.
NOTE: This PR is limited to `vector.transfer_write`. Similar patch for
`vector.transfer_read`: #94904
---
.../Vector/Transforms/VectorTransforms.cpp | 5 ++++
...tor-transfer-collapse-inner-most-dims.mlir | 27 +++++++++++++++++++
2 files changed, 32 insertions(+)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index da5954b70a2ec..045bc8142cea0 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1394,6 +1394,11 @@ class DropInnerMostUnitDimsTransferWrite
if (dimsToDrop == 0)
return failure();
+ // Make sure that the indices to be dropped are equal 0.
+ // TODO: Deal with cases when the indices are not 0.
+ if (!llvm::all_of(writeOp.getIndices().take_back(dimsToDrop), isZeroIndex))
+ return failure();
+
auto resultTargetVecType =
VectorType::get(targetType.getShape().drop_back(dimsToDrop),
targetType.getElementType(),
diff --git a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
index bd6845d1c7cda..ce66a9f6a5ff0 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
@@ -367,6 +367,33 @@ func.func @contiguous_inner_most_dynamic_outer_scalable_inner_dim(%a: index, %b:
// -----
+func.func @contiguous_inner_most_non_zero_idxs(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
+ %c0 = arith.constant 0 : index
+ vector.transfer_write %arg1, %arg0[%i, %c0] {in_bounds = [true, true]} : vector<8x1xf32>, memref<16x1xf32>
+ return
+}
+// CHECK-LABEL: func.func @contiguous_inner_most_non_zero_idxs(
+// CHECK-SAME: %[[MEM:.*]]: memref<16x1xf32>,
+// CHECK-SAME: %[[VEC:.*]]: vector<8x1xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index) {
+// CHECK: %[[SV:.*]] = memref.subview %[[MEM]][0, 0] [16, 1] [1, 1] : memref<16x1xf32> to memref<16xf32, strided<[1]>>
+// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<8x1xf32> to vector<8xf32>
+// CHECK: vector.transfer_write %[[SC]], %[[SV]]{{\[}}%[[IDX]]] {in_bounds = [true]} : vector<8xf32>, memref<16xf32, strided<[1]>>
+
+// The index to be dropped is != 0 - this is currently not supported.
+
+func.func @negative_contiguous_inner_most_dim_non_zero_idxs(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
+ %c0 = arith.constant 0 : index
+ vector.transfer_write %arg1, %arg0[%i, %i] {in_bounds = [true, true]} : vector<8x1xf32>, memref<16x1xf32>
+ return
+}
+// CHECK-LABEL: func @negative_contiguous_inner_most_dim_non_zero_idxs
+// CHECK-NOT: memref.subview
+// CHECK-NOT: memref.shape_cast
+// CHECK: vector.transfer_write
+
+// -----
+
func.func @drop_inner_most_dim(%arg0: memref<1x512x16x1xf32, strided<[8192, 16, 1, 1], offset: ?>>, %arg1: vector<1x16x16x1xf32>, %arg2: index) {
%c0 = arith.constant 0 : index
vector.transfer_write %arg1, %arg0[%c0, %arg2, %c0, %c0]
>From 72505d0cb6bd67e216b9c78e2605ad86a1c3674d Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Fri, 21 Jun 2024 16:35:59 +0100
Subject: [PATCH 2/3] fixup! [mlir][vector] Restrict
DropInnerMostUnitDimsTransferWrite
Remove duplicate test
---
...tor-transfer-collapse-inner-most-dims.mlir | 31 -------------------
1 file changed, 31 deletions(-)
diff --git a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
index ce66a9f6a5ff0..6fb3cf5ca547d 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
@@ -113,19 +113,6 @@ func.func @contiguous_inner_most_outer_dim_dyn_scalable_inner_dim(%a: index, %b:
// -----
-func.func @contiguous_inner_most_dim_non_zero_idx(%A: memref<16x1xf32>, %i:index) -> (vector<8x1xf32>) {
- %c0 = arith.constant 0 : index
- %f0 = arith.constant 0.0 : f32
- %1 = vector.transfer_read %A[%i, %c0], %f0 : memref<16x1xf32>, vector<8x1xf32>
- return %1 : vector<8x1xf32>
-}
-// CHECK: func @contiguous_inner_most_dim_non_zero_idx(%[[SRC:.+]]: memref<16x1xf32>, %[[I:.+]]: index) -> vector<8x1xf32>
-// CHECK: %[[SRC_0:.+]] = memref.subview %[[SRC]]
-// CHECK-SAME: memref<16x1xf32> to memref<16xf32, strided<[1]>>
-// CHECK: %[[V:.+]] = vector.transfer_read %[[SRC_0]]
-// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[V]] : vector<8xf32> to vector<8x1xf32>
-// CHECK: return %[[RESULT]]
-
// The index to be dropped is != 0 - this is currently not supported.
func.func @negative_contiguous_inner_most_dim_non_zero_idxs(%A: memref<16x1xf32>, %i:index) -> (vector<8x1xf32>) {
%f0 = arith.constant 0.0 : f32
@@ -136,24 +123,6 @@ func.func @negative_contiguous_inner_most_dim_non_zero_idxs(%A: memref<16x1xf32>
// CHECK-NOT: memref.subview
// CHECK: vector.transfer_read
-// Same as the top example within this split, but with the outer vector
-// dim scalable. Note that this example only makes sense when "8 = [8]" (i.e.
-// vscale = 1). This is assumed (implicitly) via the `in_bounds` attribute.
-
-func.func @contiguous_inner_most_dim_non_zero_idx_scalable_inner_dim(%A: memref<16x1xf32>, %i:index) -> (vector<[8]x1xf32>) {
- %c0 = arith.constant 0 : index
- %f0 = arith.constant 0.0 : f32
- %1 = vector.transfer_read %A[%i, %c0], %f0 : memref<16x1xf32>, vector<[8]x1xf32>
- return %1 : vector<[8]x1xf32>
-}
-// CHECK-LABEL: func @contiguous_inner_most_dim_non_zero_idx_scalable_inner_dim(
-// CHECK-SAME: %[[SRC:.+]]: memref<16x1xf32>, %[[I:.+]]: index) -> vector<[8]x1xf32>
-// CHECK: %[[SRC_0:.+]] = memref.subview %[[SRC]]
-// CHECK-SAME: memref<16x1xf32> to memref<16xf32, strided<[1]>>
-// CHECK: %[[V:.+]] = vector.transfer_read %[[SRC_0]]
-// CHECK: %[[RESULT:.+]] = vector.shape_cast %[[V]] : vector<[8]xf32> to vector<[8]x1xf32>
-// CHECK: return %[[RESULT]]
-
// -----
func.func @contiguous_inner_most_dim_with_subview(%A: memref<1000x1xf32>, %i:index, %ii:index) -> (vector<4x1xf32>) {
>From 6a3fe47fbdefcde96d76528530e79774c28534bb Mon Sep 17 00:00:00 2001
From: Andrzej Warzynski <andrzej.warzynski at arm.com>
Date: Thu, 11 Jul 2024 10:03:16 +0100
Subject: [PATCH 3/3] fixup! fixup! [mlir][vector] Restrict
DropInnerMostUnitDimsTransferWrite
Allow non-zero indices when in-bounds
---
.../Vector/Transforms/VectorTransforms.cpp | 28 +++++++++++--
...tor-transfer-collapse-inner-most-dims.mlir | 42 ++++++++++++++++---
2 files changed, 61 insertions(+), 9 deletions(-)
diff --git a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
index 045bc8142cea0..5e139648d25e7 100644
--- a/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
@@ -1394,9 +1394,31 @@ class DropInnerMostUnitDimsTransferWrite
if (dimsToDrop == 0)
return failure();
- // Make sure that the indices to be dropped are equal 0.
- // TODO: Deal with cases when the indices are not 0.
- if (!llvm::all_of(writeOp.getIndices().take_back(dimsToDrop), isZeroIndex))
+ // We need to consider 3 cases for the dim to drop:
+ // 1. if "in bounds", it can safely be assumeed that the corresponding
+ // index is equal to 0 (safe to collapse) (*)
+ // 2. if "out of bounds" and the corresponding index is 0, it is
+ // effectively "in bounds" (safe to collapse)
+ // 3. If "out of bounds" and the correspondong index is != 0,
+ // be conservative and bail out (not safe to collapse)
+ // (*) This pattern only drops unit dims, so the only possible "in bounds"
+ // index is "0". This could be added as a folder.
+ // TODO: Deal with 3. by e.g. proppaging the "out of bounds" flag to other
+ // dims.
+ bool indexOutOfBounds = true;
+ if (writeOp.getInBounds())
+ indexOutOfBounds = llvm::any_of(
+ llvm::zip(writeOp.getInBounds()->getValue().take_back(dimsToDrop),
+ writeOp.getIndices().take_back(dimsToDrop)),
+ [](auto zipped) {
+ auto inBounds = cast<BoolAttr>(std::get<0>(zipped)).getValue();
+ auto nonZeroIdx = !isZeroIndex(std::get<1>(zipped));
+ return !inBounds && nonZeroIdx;
+ });
+ else
+ indexOutOfBounds = !llvm::all_of(
+ writeOp.getIndices().take_back(dimsToDrop), isZeroIndex);
+ if (indexOutOfBounds)
return failure();
auto resultTargetVecType =
diff --git a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
index 6fb3cf5ca547d..2771d05072a12 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-collapse-inner-most-dims.mlir
@@ -336,12 +336,17 @@ func.func @contiguous_inner_most_dynamic_outer_scalable_inner_dim(%a: index, %b:
// -----
-func.func @contiguous_inner_most_non_zero_idxs(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
+// Test the impact of changing the in_bounds attribute. The behaviour will
+// depend on whether the index is == 0 or != 0.
+
+// The index to be dropped is == 0, so it's safe to collapse. The other index
+// should be preserved correctly.
+func.func @contiguous_inner_most_zero_idx_in_bounds(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
%c0 = arith.constant 0 : index
vector.transfer_write %arg1, %arg0[%i, %c0] {in_bounds = [true, true]} : vector<8x1xf32>, memref<16x1xf32>
return
}
-// CHECK-LABEL: func.func @contiguous_inner_most_non_zero_idxs(
+// CHECK-LABEL: func.func @contiguous_inner_most_zero_idx_in_bounds(
// CHECK-SAME: %[[MEM:.*]]: memref<16x1xf32>,
// CHECK-SAME: %[[VEC:.*]]: vector<8x1xf32>,
// CHECK-SAME: %[[IDX:.*]]: index) {
@@ -349,14 +354,39 @@ func.func @contiguous_inner_most_non_zero_idxs(%arg0: memref<16x1xf32>, %arg1: v
// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<8x1xf32> to vector<8xf32>
// CHECK: vector.transfer_write %[[SC]], %[[SV]]{{\[}}%[[IDX]]] {in_bounds = [true]} : vector<8xf32>, memref<16xf32, strided<[1]>>
-// The index to be dropped is != 0 - this is currently not supported.
-
-func.func @negative_contiguous_inner_most_dim_non_zero_idxs(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
+func.func @contiguous_inner_most_zero_idx_out_of_bounds(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
%c0 = arith.constant 0 : index
+ vector.transfer_write %arg1, %arg0[%i, %c0] {in_bounds = [true, false]} : vector<8x1xf32>, memref<16x1xf32>
+ return
+}
+// CHECK-LABEL: func.func @contiguous_inner_most_zero_idx_out_of_bounds
+// CHECK-SAME: %[[MEM:.*]]: memref<16x1xf32>,
+// CHECK-SAME: %[[VEC:.*]]: vector<8x1xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index) {
+// CHECK: %[[SV:.*]] = memref.subview %[[MEM]][0, 0] [16, 1] [1, 1] : memref<16x1xf32> to memref<16xf32, strided<[1]>>
+// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<8x1xf32> to vector<8xf32>
+// CHECK: vector.transfer_write %[[SC]], %[[SV]]{{\[}}%[[IDX]]] {in_bounds = [true]} : vector<8xf32>, memref<16xf32, strided<[1]>>
+
+// The index to be dropped is unknown, but since it's "in bounds", it has to be
+// == 0. It's safe to collapse the corresponding dim.
+
+func.func @contiguous_inner_most_dim_non_zero_idx_in_bounds(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
vector.transfer_write %arg1, %arg0[%i, %i] {in_bounds = [true, true]} : vector<8x1xf32>, memref<16x1xf32>
return
}
-// CHECK-LABEL: func @negative_contiguous_inner_most_dim_non_zero_idxs
+// CHECK-LABEL: func @contiguous_inner_most_dim_non_zero_idx_in_bounds
+// CHECK-SAME: %[[MEM:.*]]: memref<16x1xf32>,
+// CHECK-SAME: %[[VEC:.*]]: vector<8x1xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index) {
+// CHECK: %[[SV:.*]] = memref.subview %[[MEM]][0, 0] [16, 1] [1, 1] : memref<16x1xf32> to memref<16xf32, strided<[1]>>
+// CHECK: %[[SC:.*]] = vector.shape_cast %[[VEC]] : vector<8x1xf32> to vector<8xf32>
+// CHECK: vector.transfer_write %[[SC]], %[[SV]]{{\[}}%[[IDX]]] {in_bounds = [true]} : vector<8xf32>, memref<16xf32, strided<[1]>>
+
+func.func @negative_contiguous_inner_most_dim_non_zero_idx_out_of_bounds(%arg0: memref<16x1xf32>, %arg1: vector<8x1xf32>, %i: index) {
+ vector.transfer_write %arg1, %arg0[%i, %i] {in_bounds = [true, false]} : vector<8x1xf32>, memref<16x1xf32>
+ return
+}
+// CHECK-LABEL: func @negative_contiguous_inner_most_dim_non_zero_idx_out_of_bounds
// CHECK-NOT: memref.subview
// CHECK-NOT: memref.shape_cast
// CHECK: vector.transfer_write
More information about the Mlir-commits
mailing list