[Mlir-commits] [mlir] 2bc8ffa - [mlir] Support permutation maps in vector transfer op folder
Matthias Springer
llvmlistbot at llvm.org
Mon May 31 01:30:43 PDT 2021
Author: Matthias Springer
Date: 2021-05-31T17:22:46+09:00
New Revision: 2bc8ffa8afabeaf1cc97640419c1c2a926265170
URL: https://github.com/llvm/llvm-project/commit/2bc8ffa8afabeaf1cc97640419c1c2a926265170
DIFF: https://github.com/llvm/llvm-project/commit/2bc8ffa8afabeaf1cc97640419c1c2a926265170.diff
LOG: [mlir] Support permutation maps in vector transfer op folder
Fold away in_bounds attribute even if the transfer op has a non-identity permutation map.
Differential Revision: https://reviews.llvm.org/D103133
Added:
Modified:
mlir/lib/Dialect/Vector/VectorOps.cpp
mlir/test/Dialect/Linalg/vectorization.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp
index 66a14a16cae75..92a672050a19e 100644
--- a/mlir/lib/Dialect/Vector/VectorOps.cpp
+++ b/mlir/lib/Dialect/Vector/VectorOps.cpp
@@ -2630,24 +2630,25 @@ static bool isInBounds(TransferOp op, int64_t resultIdx, int64_t indicesIdx) {
template <typename TransferOp>
static LogicalResult foldTransferInBoundsAttribute(TransferOp op) {
AffineMap permutationMap = op.permutation_map();
- if (!permutationMap.isMinorIdentity())
- return failure();
bool changed = false;
SmallVector<bool, 4> newInBounds;
newInBounds.reserve(op.getTransferRank());
- op.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) {
+ for (unsigned i = 0; i < op.getTransferRank(); ++i) {
// Already marked as in-bounds, nothing to see here.
- if (op.isDimInBounds(resultIdx)) {
+ if (op.isDimInBounds(i)) {
newInBounds.push_back(true);
- return;
+ continue;
}
// Currently out-of-bounds, check whether we can statically determine it is
// inBounds.
- auto inBounds = isInBounds(op, resultIdx, indicesIdx);
+ auto dimExpr = permutationMap.getResult(i).dyn_cast<AffineDimExpr>();
+ assert(dimExpr && "Broadcast dims must be in-bounds");
+ auto inBounds = isInBounds(
+ op, /*resultIdx=*/i, /*indicesIdx=*/dimExpr.getPosition());
newInBounds.push_back(inBounds);
// We commit the pattern if it is "more inbounds".
changed |= inBounds;
- });
+ }
if (!changed)
return failure();
// OpBuilder is only used as a helper to build an I64ArrayAttr.
diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir
index f4caad70ae197..236bf3b335472 100644
--- a/mlir/test/Dialect/Linalg/vectorization.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization.mlir
@@ -388,10 +388,10 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
// CHECK: func @generic_vectorize_broadcast_transpose
// CHECK-DAG: %[[C0:.*]] = constant 0 : index
// CHECK-DAG: %[[CF:.*]] = constant 0.000000e+00 : f32
-// CHECK: %[[V0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {permutation_map = #[[$MAP0]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
-// CHECK: %[[V1:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {permutation_map = #[[$MAP1]]} : memref<4xf32>, vector<4x4x4x4xf32>
-// CHECK: %[[V2:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {permutation_map = #[[$MAP2]]} : memref<4xf32>, vector<4x4x4x4xf32>
-// CHECK: %[[V3:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {permutation_map = #[[$MAP3]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
+// CHECK: %[[V0:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP0]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
+// CHECK: %[[V1:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP1]]} : memref<4xf32>, vector<4x4x4x4xf32>
+// CHECK: %[[V2:.*]] = vector.transfer_read %{{.*}}[%[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP2]]} : memref<4xf32>, vector<4x4x4x4xf32>
+// CHECK: %[[V3:.*]] = vector.transfer_read %{{.*}}[%[[C0]], %[[C0]]], %[[CF]] {in_bounds = [true, true, true, true], permutation_map = #[[$MAP3]]} : memref<4x4xf32>, vector<4x4x4x4xf32>
// CHECK: %[[SUB:.*]] = subf %[[V0]], %[[V1]] : vector<4x4x4x4xf32>
// CHECK: %[[ADD0:.*]] = addf %[[V2]], %[[SUB]] : vector<4x4x4x4xf32>
// CHECK: %[[ADD1:.*]] = addf %[[V3]], %[[ADD0]] : vector<4x4x4x4xf32>
@@ -433,9 +433,9 @@ func @generic_vectorize_broadcast_transpose(
// CHECK-DAG: #[[MAP1:.*]] = affine_map<(d0, d1) -> (0, d1, 0, d0)>
// CHECK-DAG: #[[MAP2:.*]] = affine_map<(d0, d1, d2, d3) -> (d2, d1, d3, d0)>
// CHECK: func @vectorization_transpose
-// CHECK: vector.transfer_read {{.*}}{permutation_map = #[[MAP0]]} : memref<14x7xf32>, vector<7x14x8x16xf32>
-// CHECK: vector.transfer_read {{.*}}{permutation_map = #[[MAP1]]} : memref<16x14xf32>, vector<7x14x8x16xf32>
-// CHECK: vector.transfer_read {{.*}}{permutation_map = #[[MAP2]]} : memref<16x14x7x8xf32>, vector<7x14x8x16xf32>
+// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP0]]} : memref<14x7xf32>, vector<7x14x8x16xf32>
+// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP1]]} : memref<16x14xf32>, vector<7x14x8x16xf32>
+// CHECK: vector.transfer_read {{.*}}{in_bounds = [true, true, true, true], permutation_map = #[[MAP2]]} : memref<16x14x7x8xf32>, vector<7x14x8x16xf32>
// CHECK: addf {{.*}} : vector<7x14x8x16xf32>
// CHECK: addf {{.*}} : vector<7x14x8x16xf32>
// CHECK: vector.transfer_write {{.*}} : vector<7x14x8x16xf32>, memref<7x14x8x16xf32>
@@ -565,7 +565,7 @@ func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
-> tensor<4x16xf32>
{
// CHECK: vector.transfer_read {{.*}} : tensor<4x16x8xf32>, vector<4x16x8xf32>
- // CHECK: vector.transfer_read {{.*}} {permutation_map = #[[$M0]]} : tensor<4x16xf32>, vector<4x16x8xf32>
+ // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true], permutation_map = #[[$M0]]} : tensor<4x16xf32>, vector<4x16x8xf32>
// CHECK: math.exp {{.*}} : vector<4x16x8xf32>
// CHECK: addf {{.*}} : vector<4x16x8xf32>
// CHECK: vector.multi_reduction #vector.kind<add>, %{{.*}} [2] : vector<4x16x8xf32> to vector<4x16xf32>
@@ -597,15 +597,15 @@ func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: tensor<5x2xf32>)
-> tensor<5x2xf32>
{
- // CHECK: vector.transfer_read {{.*}} {permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32>
- // CHECK: vector.transfer_read {{.*}} {permutation_map = #[[$M2]]} : tensor<5x4xf32>, vector<2x3x4x5xf32>
- // CHECK: vector.transfer_read {{.*}} {permutation_map = #[[$M3]]} : tensor<5x2xf32>, vector<2x3x4x5xf32>
+ // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M1]]} : tensor<3x2xf32>, vector<2x3x4x5xf32>
+ // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M2]]} : tensor<5x4xf32>, vector<2x3x4x5xf32>
+ // CHECK: vector.transfer_read {{.*}} {in_bounds = [true, true, true, true], permutation_map = #[[$M3]]} : tensor<5x2xf32>, vector<2x3x4x5xf32>
// CHECK: math.exp {{.*}} : vector<2x3x4x5xf32>
// CHECK: math.exp {{.*}} : vector<2x3x4x5xf32>
// CHECK: addf {{.*}} : vector<2x3x4x5xf32>
// CHECK: addf {{.*}} : vector<2x3x4x5xf32>
// CHECK: vector.multi_reduction #vector.kind<add>, {{.*}} [1, 2] : vector<2x3x4x5xf32> to vector<2x5xf32>
- // CHECK: vector.transfer_write {{.*}} {permutation_map = #[[$M4]]} : vector<2x5xf32>, tensor<5x2xf32>
+ // CHECK: vector.transfer_write {{.*}} {in_bounds = [true, true], permutation_map = #[[$M4]]} : vector<2x5xf32>, tensor<5x2xf32>
// CHECK: return {{.*}} : tensor<5x2xf32>
%0 = linalg.generic {
indexing_maps = [
More information about the Mlir-commits
mailing list