[Mlir-commits] [mlir] f397bdf - [mlir][tensor] Fold consumer linalg transpose with producer tensor pack (#74206)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Dec 13 14:26:22 PST 2023


Author: Prathamesh Tagore
Date: 2023-12-13T14:26:19-08:00
New Revision: f397bdf5aee331d984d5e41ed39a6834ec9fe0c5

URL: https://github.com/llvm/llvm-project/commit/f397bdf5aee331d984d5e41ed39a6834ec9fe0c5
DIFF: https://github.com/llvm/llvm-project/commit/f397bdf5aee331d984d5e41ed39a6834ec9fe0c5.diff

LOG: [mlir][tensor] Fold consumer linalg transpose with producer tensor pack (#74206)

Partial fix to https://github.com/openxla/iree/issues/15367

Added: 
    

Modified: 
    mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
    mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index 9eac3e5c7ef910..e4509b331beeac 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -6,6 +6,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "mlir/Dialect/Linalg/IR/Linalg.h"
 #include "mlir/Dialect/Tensor/IR/Tensor.h"
 #include "mlir/Dialect/Tensor/Transforms/Transforms.h"
 #include "mlir/IR/PatternMatch.h"
@@ -81,10 +82,71 @@ struct FoldUnpackWithExtractSliceOp : public OpRewritePattern<ExtractSliceOp> {
     return success();
   }
 };
+
+/// Fold 'pack' -> 'transpose' into 'pack' since 'pack' already has transpose
+/// semantics.
+struct FoldProducerPackWithConsumerLinalgTransposeOp
+    : public OpRewritePattern<linalg::TransposeOp> {
+  using OpRewritePattern<linalg::TransposeOp>::OpRewritePattern;
+
+  LogicalResult matchAndRewrite(linalg::TransposeOp transposeOp,
+                                PatternRewriter &rewriter) const override {
+    auto packOp = transposeOp.getOperand(0).getDefiningOp<PackOp>();
+
+    if (!packOp)
+      return failure();
+
+    auto innerDimsPos = packOp.getInnerDimsPos();
+    auto mixedInnerTiles = packOp.getMixedTiles();
+    auto outerDimsPerm = packOp.getOuterDimsPerm();
+    auto transposePerm = transposeOp.getPermutation();
+    SmallVector<int64_t> newOuterDimsPermVec;
+    SmallVector<int64_t> newInnerDimsPosVec;
+    SmallVector<OpFoldResult> newMixedInnerTilesVec;
+    int64_t srcRank = packOp.getSourceRank();
+
+    // Process transpose operation for non-tiled outer dimensions
+    for (unsigned int i = 0; i < srcRank; ++i) {
+      int64_t remappedPosition = transposePerm[i];
+
+      // If tensor.pack has outer_dims_perm attribute, then consider it during
+      // index remapping.
+      if (!outerDimsPerm.empty()) {
+        if (transposePerm[i] >= srcRank) {
+          return rewriter.notifyMatchFailure(
+              transposeOp,
+              "Cannot fold in tensor.pack if a tile dimension was transposed "
+              "with a non-tile dimension in linalg.transpose.");
+        }
+        remappedPosition = outerDimsPerm[remappedPosition];
+      }
+
+      newOuterDimsPermVec.push_back(remappedPosition);
+    }
+
+    // Process transpose operation for tiled inner dimensions
+    for (unsigned int i = srcRank; i < transposePerm.size(); ++i) {
+      int64_t remappedPosition = transposePerm[i] - srcRank;
+      newMixedInnerTilesVec.push_back(mixedInnerTiles[remappedPosition]);
+      newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
+    }
+
+    Value output = packOp.createDestinationTensor(
+        rewriter, transposeOp.getLoc(), packOp.getSource(),
+        newMixedInnerTilesVec, newInnerDimsPosVec, newOuterDimsPermVec);
+
+    rewriter.replaceOpWithNewOp<PackOp>(
+        transposeOp, packOp.getSource(), output, newInnerDimsPosVec,
+        newMixedInnerTilesVec, packOp.getPaddingValue(), newOuterDimsPermVec);
+
+    return success();
+  }
+};
 } // namespace
 
 void populateFoldIntoPackAndUnpackPatterns(RewritePatternSet &patterns) {
-  patterns.insert<FoldUnpackWithExtractSliceOp, FoldPadWithPackOp>(
+  patterns.insert<FoldUnpackWithExtractSliceOp, FoldPadWithPackOp,
+                  FoldProducerPackWithConsumerLinalgTransposeOp>(
       patterns.getContext());
 }
 

diff  --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index 5c757896657427..ca4eb4ff679445 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -114,3 +114,234 @@ func.func @pad_pack_
diff erent_padding_value(%src: tensor<16641x16xf32>) -> tenso
 // CHECK-LABEL: func.func @pad_pack_
diff erent_padding_value
 // CHECK:         tensor.pad
 // CHECK:         tensor.pack
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold(%arg0: tensor<56x57x1x64xf32>) -> tensor<1x57x56x2x32xf32> {
+  %0 = tensor.empty() : tensor<56x2x1x57x32xf32>
+  %pack = tensor.pack %arg0
+    outer_dims_perm = [0, 3, 2, 1]
+    inner_dims_pos = [3]
+    inner_tiles = [32]
+    into %0 : tensor<56x57x1x64xf32> -> tensor<56x2x1x57x32xf32>
+
+  %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<56x2x1x57x32xf32>)
+    outs(%1 : tensor<1x57x56x2x32xf32>)
+    permutation = [2, 3, 0, 1, 4]
+  return %transposed : tensor<1x57x56x2x32xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
+//      CHECK:   %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
+//      CHECK:   %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME:      outer_dims_perm = [2, 1, 0, 3]
+// CHECK-SAME:      inner_dims_pos = [3] inner_tiles = [32] 
+// CHECK-SAME:       into %[[INIT]]
+//      CHECK:   return %[[PACK]]
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_with_padding(%arg0: tensor<56x57x1x55xf32>, %padding: f32) -> tensor<1x57x56x2x32xf32> {
+  %0 = tensor.empty() : tensor<56x2x1x57x32xf32>
+  %pack = tensor.pack %arg0 padding_value(%padding : f32)
+    outer_dims_perm = [0, 3, 2, 1]
+    inner_dims_pos = [3]
+    inner_tiles = [32]
+    into %0 : tensor<56x57x1x55xf32> -> tensor<56x2x1x57x32xf32>
+
+  %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<56x2x1x57x32xf32>)
+    outs(%1 : tensor<1x57x56x2x32xf32>)
+    permutation = [2, 3, 0, 1, 4]
+  return %transposed : tensor<1x57x56x2x32xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold_with_padding(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x57x1x55xf32>, %[[PADDING:.+]]: f32)
+//      CHECK:   %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
+//      CHECK:   %[[PACK:.+]] = tensor.pack %[[ARG0]] padding_value(%[[PADDING]] : f32)
+// CHECK-SAME:      outer_dims_perm = [2, 1, 0, 3]
+// CHECK-SAME:      inner_dims_pos = [3] inner_tiles = [32] 
+// CHECK-SAME:       into %[[INIT]]
+//      CHECK:   return %[[PACK]]
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_no_outer_dims_perm(%arg0: tensor<56x57x1x64xf32>) -> tensor<1x2x56x57x32xf32> {
+  %0 = tensor.empty() : tensor<56x57x1x2x32xf32>
+  %pack = tensor.pack %arg0
+    inner_dims_pos = [3]
+    inner_tiles = [32]
+    into %0 : tensor<56x57x1x64xf32> -> tensor<56x57x1x2x32xf32>
+
+  %1 = tensor.empty() : tensor<1x2x56x57x32xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<56x57x1x2x32xf32>)
+    outs(%1 : tensor<1x2x56x57x32xf32>)
+    permutation = [2, 3, 0, 1, 4]
+  return %transposed : tensor<1x2x56x57x32xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold_no_outer_dims_perm(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
+//      CHECK:   %[[INIT:.+]] = tensor.empty() : tensor<1x2x56x57x32xf32>
+//      CHECK:   %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME:      outer_dims_perm = [2, 3, 0, 1]
+// CHECK-SAME:      inner_dims_pos = [3] inner_tiles = [32] 
+// CHECK-SAME:       into %[[INIT]]
+//      CHECK:   return %[[PACK]]
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_tile_dims_transpose(%arg0: tensor<56x72x24x128xf32>) -> tensor<12x56x4x9x32x8x2xf32> {
+  %0 = tensor.empty() : tensor<4x9x12x56x8x2x32xf32>
+  %pack = tensor.pack %arg0
+    outer_dims_perm = [3, 1, 2, 0]
+    inner_dims_pos = [1, 2, 3]
+    inner_tiles = [8, 2, 32]
+    into %0 : tensor<56x72x24x128xf32> -> tensor<4x9x12x56x8x2x32xf32>
+
+  %1 = tensor.empty() : tensor<12x56x4x9x32x8x2xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<4x9x12x56x8x2x32xf32>)
+    outs(%1 : tensor<12x56x4x9x32x8x2xf32>)
+    permutation = [2, 3, 0, 1, 6, 4, 5]
+  return %transposed : tensor<12x56x4x9x32x8x2xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold_tile_dims_transpose(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x72x24x128xf32>)
+//      CHECK:   %[[INIT:.+]] = tensor.empty() : tensor<12x56x4x9x32x8x2xf32>
+//      CHECK:   %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME:      outer_dims_perm = [2, 0, 3, 1]
+// CHECK-SAME:      inner_dims_pos = [3, 1, 2] inner_tiles = [32, 8, 2] 
+// CHECK-SAME:       into %[[INIT]]
+//      CHECK:   return %[[PACK]]
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_tile_dims_outer_dims_transpose(%arg0: tensor<56x72x24x128xf32>) -> tensor<9x56x2x12x32x8x4xf32> {
+  %0 = tensor.empty() : tensor<4x12x9x56x8x2x32xf32>
+  %pack = tensor.pack %arg0
+    outer_dims_perm = [3, 2, 1, 0]
+    inner_dims_pos = [1, 2, 3]
+    inner_tiles = [8, 2, 32]
+    into %0 : tensor<56x72x24x128xf32> -> tensor<4x12x9x56x8x2x32xf32>
+
+  %1 = tensor.empty() : tensor<9x56x2x12x32x8x4xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<4x12x9x56x8x2x32xf32>)
+    outs(%1 : tensor<9x56x2x12x32x8x4xf32>)
+    permutation = [2, 3, 5, 1, 6, 4, 0]
+  return %transposed : tensor<9x56x2x12x32x8x4xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold_tile_dims_outer_dims_transpose(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x72x24x128xf32>)
+//      CHECK:   tensor.pack
+//      CHECK:   linalg.transpose
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims(%arg0: tensor<56x?x?x64xf32>) -> tensor<?x?x56x2x32xf32> {
+  %0 = tensor.empty() : tensor<56x2x1x57x32xf32>
+  %pack = tensor.pack %arg0
+    outer_dims_perm = [0, 3, 2, 1]
+    inner_dims_pos = [3]
+    inner_tiles = [32]
+    into %0 : tensor<56x?x?x64xf32> -> tensor<56x2x1x57x32xf32>
+
+  %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<56x2x1x57x32xf32>)
+    outs(%1 : tensor<1x57x56x2x32xf32>)
+    permutation = [2, 3, 0, 1, 4]
+
+  %return_value = tensor.cast %transposed : tensor<1x57x56x2x32xf32> to tensor<?x?x56x2x32xf32>  
+  return %return_value : tensor<?x?x56x2x32xf32>
+}
+//      CHECK: func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims(
+// CHECK-SAME:     %[[ARG0:.+]]: tensor<56x?x?x64xf32>)
+//      CHECK:   %[[c1:.+]] = arith.constant 1 : index
+//      CHECK:   %[[c2:.+]] = arith.constant 2 : index
+//      CHECK:   %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<56x?x?x64xf32>
+//      CHECK:   %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<56x?x?x64xf32>
+//      CHECK:   %[[INIT:.+]] = tensor.empty(%[[dim_0]], %[[dim]]) : tensor<?x?x56x2x32xf32>
+//      CHECK:   %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME:      outer_dims_perm = [2, 1, 0, 3]
+// CHECK-SAME:      inner_dims_pos = [3] inner_tiles = [32] 
+// CHECK-SAME:       into %[[INIT]]
+//      CHECK:   return %[[PACK]]
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_and_tile_dims(%arg0: tensor<56x?x?x128xf32>) -> tensor<?x?x56x9x32x8x2xf32> {
+  %0 = tensor.empty() : tensor<56x9x12x4x8x2x32xf32>
+  %pack = tensor.pack %arg0
+    inner_dims_pos = [1, 2, 3]
+    inner_tiles = [8, 2, 32]
+    into %0 : tensor<56x?x?x128xf32> -> tensor<56x9x12x4x8x2x32xf32>
+
+  %1 = tensor.empty() : tensor<12x4x56x9x32x8x2xf32>
+  %transposed = linalg.transpose
+    ins(%pack : tensor<56x9x12x4x8x2x32xf32>)
+    outs(%1 : tensor<12x4x56x9x32x8x2xf32>)
+    permutation = [2, 3, 0, 1, 6, 4, 5]
+  
+  %return_value = tensor.cast %transposed : tensor<12x4x56x9x32x8x2xf32> to tensor<?x?x56x9x32x8x2xf32> 
+  return %return_value : tensor<?x?x56x9x32x8x2xf32>
+}
+//      CHECK: #[[map:.+]] = affine_map<()[s0] -> (s0 ceildiv 8)>
+//      CHECK: #[[map1:.+]] = affine_map<()[s0] -> (s0 ceildiv 2)>
+//      CHECK: module {
+//      CHECK:   func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_and_tile_dims(
+// CHECK-SAME:   %[[ARG0:.+]]: tensor<56x?x?x128xf32>) 
+//      CHECK:     %[[c1:.+]] = arith.constant 1 : index
+//      CHECK:     %[[c2:.+]] = arith.constant 2 : index
+//      CHECK:     %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<56x?x?x128xf32>
+//      CHECK:     %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<56x?x?x128xf32>
+//      CHECK:     %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim]]]
+//      CHECK:     %[[mapped_dim2:.+]] = affine.apply #[[map1:.+]]()[%[[dim_0]]]
+//      CHECK:     %[[INIT:.+]] = tensor.empty(%[[mapped_dim2]], %[[mapped_dim1]]) : tensor<?x4x56x?x32x8x2xf32>
+//      CHECK:     %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 3, 0, 1] inner_dims_pos = [3, 1, 2] inner_tiles = [32, 8, 2] into %[[INIT]] : tensor<56x?x?x128xf32> -> tensor<?x4x56x?x32x8x2xf32>
+//      CHECK:     %[[CAST:.+]] = tensor.cast %[[PACK]] : tensor<?x4x56x?x32x8x2xf32> to tensor<?x?x56x9x32x8x2xf32>
+//      CHECK:     return %[[CAST]] : tensor<?x?x56x9x32x8x2xf32>
+//      CHECK:   }
+
+// -----
+
+func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_sizes(%arg0: tensor<?x?x?x?xf32>, %pack_dest: tensor<?x?x?x?x?x?x?xf32>, %transpose_dest: tensor<?x?x?x?x?x?x?xf32>, %tile_p : index, %tile_q : index, %tile_r : index) -> tensor<?x?x?x?x?x?x?xf32> {
+  %pack = tensor.pack %arg0
+    outer_dims_perm = [3, 0, 2, 1]
+    inner_dims_pos = [1, 2, 3]
+    inner_tiles = [%tile_p, %tile_q, %tile_r]
+    into %pack_dest : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
+
+  %transposed = linalg.transpose
+    ins(%pack : tensor<?x?x?x?x?x?x?xf32>)
+    outs(%transpose_dest : tensor<?x?x?x?x?x?x?xf32>)
+    permutation = [2, 3, 0, 1, 6, 4, 5]
+
+  return %transposed : tensor<?x?x?x?x?x?x?xf32>
+}
+//      CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
+//      CHECK: module {
+//      CHECK:   func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_sizes(
+// CHECK-SAME:   %[[ARG0:.+]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME:   %[[PACK_DEST:.+]]: tensor<?x?x?x?x?x?x?xf32>, %[[TRANSPOSE_DEST:.+]]: tensor<?x?x?x?x?x?x?xf32>,
+// CHECK-SAME:   %[[ARG1:.+]]: index, %[[ARG2:.+]]: index,
+// CHECK-SAME:   %[[ARG3:.+]]: index) 
+//      CHECK:     %[[c0:.+]] = arith.constant 0 : index
+//      CHECK:     %[[c1:.+]] = arith.constant 1 : index
+//      CHECK:     %[[c2:.+]] = arith.constant 2 : index
+//      CHECK:     %[[c3:.+]] = arith.constant 3 : index
+//      CHECK:     %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c0]] : tensor<?x?x?x?xf32>
+//      CHECK:     %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<?x?x?x?xf32>
+//      CHECK:     %[[dim_1:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<?x?x?x?xf32>
+//      CHECK:     %[[dim_2:.+]] = tensor.dim %[[ARG0]], %[[c3]] : tensor<?x?x?x?xf32>
+//      CHECK:     %[[mapped_dim0:.+]] = affine.apply #[[map:.+]]()[%[[dim_2]], %[[ARG3]]]
+//      CHECK:     %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim_0]], %[[ARG1]]]
+//      CHECK:     %[[mapped_dim2:.+]] = affine.apply #[[map:.+]]()[%[[dim_1]], %[[ARG2]]]
+//      CHECK:     %[[INIT:.+]] = tensor.empty(%[[mapped_dim2]], %[[mapped_dim1]], %[[mapped_dim0]], %[[dim]], %[[ARG3]], %[[ARG1]], %[[ARG2]]) : tensor<?x?x?x?x?x?x?xf32>
+//      CHECK:     %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 1, 3, 0] inner_dims_pos = [3, 1, 2] inner_tiles = [%[[ARG3]], %[[ARG1]], %[[ARG2]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
+//      CHECK:     return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
+//      CHECK:   }


        


More information about the Mlir-commits mailing list