[Mlir-commits] [mlir] [mlir][tensor] Fold producer linalg transpose with consumer tensor pack (PR #75658)
Prathamesh Tagore
llvmlistbot at llvm.org
Wed Jan 3 09:39:07 PST 2024
https://github.com/meshtag updated https://github.com/llvm/llvm-project/pull/75658
>From c7706210742e65deef4afea9a0caaf0715b152b6 Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Fri, 15 Dec 2023 07:42:19 +0000
Subject: [PATCH 1/8] Add support for folding consumer pack with producer
transpose
---
.../FoldIntoPackAndUnpackPatterns.cpp | 132 ++++++++++++++----
.../Tensor/fold-into-pack-and-unpack.mlir | 115 +++++++++++++++
2 files changed, 217 insertions(+), 30 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index e4509b331beeac..d9dc365c7e85cd 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -21,6 +21,57 @@ static bool areAllConstantIntValue(ArrayRef<OpFoldResult> ofrs, int64_t value) {
ofrs, [&](OpFoldResult ofr) { return isConstantIntValue(ofr, value); });
}
+/// Helper function to generate an equivalent permutation map for
+/// `linalg.transpose` and `tensor.pack` which will be used after their folding
+/// into a `tensor.pack`.
+static bool getRemappedPermutationForTransposeAndPack(
+ PackOp packOp, linalg::TransposeOp transposeOp,
+ SmallVector<int64_t> &newOuterDimsPermVec,
+ SmallVector<int64_t> &newInnerDimsPosVec,
+ SmallVector<OpFoldResult> &newMixedInnerTilesVec,
+ bool isTransposeProducer) {
+ bool foldingPossible = true;
+ auto innerDimsPos = packOp.getInnerDimsPos();
+ auto mixedInnerTiles = packOp.getMixedTiles();
+ auto outerDimsPerm = packOp.getOuterDimsPerm();
+ auto transposePerm = transposeOp.getPermutation();
+ int64_t srcRank = packOp.getSourceRank();
+
+ // Note: if isTransposeProducer = true, transposePerm.size() = srcRank, else
+ // transposePerm.size() > srcRank
+
+ // Process transpose operation for non-tiled outer dimensions
+ for (unsigned int i = 0; i < srcRank; ++i) {
+ int64_t remappedPosition =
+ isTransposeProducer ? (!outerDimsPerm.empty() ? outerDimsPerm[i] : i)
+ : transposePerm[i];
+
+ if (remappedPosition >= srcRank) {
+ foldingPossible = false;
+ return foldingPossible;
+ }
+
+ remappedPosition =
+ isTransposeProducer
+ ? transposePerm[remappedPosition]
+ : (!outerDimsPerm.empty() ? outerDimsPerm[remappedPosition]
+ : remappedPosition);
+
+ newOuterDimsPermVec.push_back(remappedPosition);
+ }
+
+ // Process transpose operation for tiled inner dimensions
+ for (unsigned int i = srcRank; i < srcRank + mixedInnerTiles.size(); ++i) {
+ int64_t remappedPosition =
+ isTransposeProducer ? i - srcRank : transposePerm[i] - srcRank;
+
+ newMixedInnerTilesVec.push_back(mixedInnerTiles[remappedPosition]);
+ newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
+ }
+
+ return foldingPossible;
+}
+
/// Fold a `pad` -> `pack` into `pack` if they have the same padding values and
/// the pad op has zero low paddings, or if `pack` has no padding values.
struct FoldPadWithPackOp : public OpRewritePattern<PackOp> {
@@ -96,39 +147,19 @@ struct FoldProducerPackWithConsumerLinalgTransposeOp
if (!packOp)
return failure();
- auto innerDimsPos = packOp.getInnerDimsPos();
- auto mixedInnerTiles = packOp.getMixedTiles();
- auto outerDimsPerm = packOp.getOuterDimsPerm();
- auto transposePerm = transposeOp.getPermutation();
SmallVector<int64_t> newOuterDimsPermVec;
SmallVector<int64_t> newInnerDimsPosVec;
SmallVector<OpFoldResult> newMixedInnerTilesVec;
- int64_t srcRank = packOp.getSourceRank();
-
- // Process transpose operation for non-tiled outer dimensions
- for (unsigned int i = 0; i < srcRank; ++i) {
- int64_t remappedPosition = transposePerm[i];
-
- // If tensor.pack has outer_dims_perm attribute, then consider it during
- // index remapping.
- if (!outerDimsPerm.empty()) {
- if (transposePerm[i] >= srcRank) {
- return rewriter.notifyMatchFailure(
- transposeOp,
- "Cannot fold in tensor.pack if a tile dimension was transposed "
- "with a non-tile dimension in linalg.transpose.");
- }
- remappedPosition = outerDimsPerm[remappedPosition];
- }
-
- newOuterDimsPermVec.push_back(remappedPosition);
- }
- // Process transpose operation for tiled inner dimensions
- for (unsigned int i = srcRank; i < transposePerm.size(); ++i) {
- int64_t remappedPosition = transposePerm[i] - srcRank;
- newMixedInnerTilesVec.push_back(mixedInnerTiles[remappedPosition]);
- newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
+ bool foldingPossible = getRemappedPermutationForTransposeAndPack(
+ packOp, transposeOp, newOuterDimsPermVec, newInnerDimsPosVec,
+ newMixedInnerTilesVec, /*isTransposeProducer*/ false);
+
+ if (!foldingPossible) {
+ return rewriter.notifyMatchFailure(
+ transposeOp,
+ "Cannot fold in tensor.pack if a tile dimension was transposed "
+ "with a non-tile dimension in linalg.transpose.");
}
Value output = packOp.createDestinationTensor(
@@ -142,11 +173,52 @@ struct FoldProducerPackWithConsumerLinalgTransposeOp
return success();
}
};
+
+/// Fold 'transpose' -> 'pack' into 'pack' since 'pack' already has transpose
+/// semantics.
+struct FoldConsumerPackWithProducerLinalgTransposeOp
+ : public OpRewritePattern<PackOp> {
+ using OpRewritePattern<PackOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(PackOp packOp,
+ PatternRewriter &rewriter) const override {
+ auto transposeOp = packOp.getSource().getDefiningOp<linalg::TransposeOp>();
+
+ if (!transposeOp)
+ return failure();
+
+ SmallVector<int64_t> newOuterDimsPermVec;
+ SmallVector<int64_t> newInnerDimsPosVec;
+ SmallVector<OpFoldResult> newMixedInnerTilesVec;
+
+ bool foldingPossible = getRemappedPermutationForTransposeAndPack(
+ packOp, transposeOp, newOuterDimsPermVec, newInnerDimsPosVec,
+ newMixedInnerTilesVec, /*isTransposeProducer*/ true);
+
+ if (!foldingPossible) {
+ return rewriter.notifyMatchFailure(
+ transposeOp,
+ "Cannot fold in tensor.pack if a tile dimension was transposed "
+ "with a non-tile dimension in linalg.transpose.");
+ }
+
+ Value output = packOp.createDestinationTensor(
+ rewriter, packOp.getLoc(), transposeOp.getOperand(0),
+ newMixedInnerTilesVec, newInnerDimsPosVec, newOuterDimsPermVec);
+
+ rewriter.replaceOpWithNewOp<PackOp>(
+ packOp, transposeOp.getOperand(0), output, newInnerDimsPosVec,
+ newMixedInnerTilesVec, packOp.getPaddingValue(), newOuterDimsPermVec);
+
+ return success();
+ }
+};
} // namespace
void populateFoldIntoPackAndUnpackPatterns(RewritePatternSet &patterns) {
patterns.insert<FoldUnpackWithExtractSliceOp, FoldPadWithPackOp,
- FoldProducerPackWithConsumerLinalgTransposeOp>(
+ FoldProducerPackWithConsumerLinalgTransposeOp,
+ FoldConsumerPackWithProducerLinalgTransposeOp>(
patterns.getContext());
}
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index ca4eb4ff679445..ed101883a40f9a 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -345,3 +345,118 @@ func.func @tensor_pack_linalg_transpose_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [2, 1, 3, 0] inner_dims_pos = [3, 1, 2] inner_tiles = [%[[ARG3]], %[[ARG1]], %[[ARG2]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
// CHECK: }
+
+// -----
+
+func.func @linalg_transpose_tensor_pack_fold(%arg0: tensor<56x57x1x64xf32>) -> tensor<1x57x56x2x32xf32> {
+ %0 = tensor.empty() : tensor<1x56x57x64xf32>
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<56x57x1x64xf32>)
+ outs(%0 : tensor<1x56x57x64xf32>)
+ permutation = [2, 0, 1, 3]
+
+ %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+ %pack = tensor.pack %transposed
+ outer_dims_perm = [0, 2, 1, 3]
+ inner_dims_pos = [3]
+ inner_tiles = [32]
+ into %1 : tensor<1x56x57x64xf32> -> tensor<1x57x56x2x32xf32>
+ return %pack : tensor<1x57x56x2x32xf32>
+}
+// CHECK: func @linalg_transpose_tensor_pack_fold(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
+// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [2, 1, 0, 3]
+// CHECK-SAME: inner_dims_pos = [3] inner_tiles = [32]
+// CHECK-SAME: into %[[INIT]]
+// CHECK: return %[[PACK]]
+
+// -----
+
+func.func @linalg_transpose_tensor_pack_fold_with_padding(%arg0: tensor<56x57x1x55xf32>, %padding: f32) -> tensor<1x57x56x2x32xf32> {
+ %0 = tensor.empty() : tensor<1x56x57x55xf32>
+ %transpose = linalg.transpose
+ ins(%arg0 : tensor<56x57x1x55xf32>)
+ outs(%0 : tensor<1x56x57x55xf32>)
+ permutation = [2, 0, 1, 3]
+
+ %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+ %pack = tensor.pack %transpose padding_value(%padding : f32)
+ outer_dims_perm = [0, 2, 1, 3]
+ inner_dims_pos = [3]
+ inner_tiles = [32]
+ into %1 : tensor<1x56x57x55xf32> -> tensor<1x57x56x2x32xf32>
+ return %pack : tensor<1x57x56x2x32xf32>
+}
+// CHECK: func @linalg_transpose_tensor_pack_fold_with_padding(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x55xf32>, %[[PADDING:.+]]: f32)
+// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] padding_value(%[[PADDING]] : f32)
+// CHECK-SAME: outer_dims_perm = [2, 1, 0, 3]
+// CHECK-SAME: inner_dims_pos = [3] inner_tiles = [32]
+// CHECK-SAME: into %[[INIT]]
+// CHECK: return %[[PACK]]
+
+// -----
+
+func.func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(%arg0: tensor<56x57x1x64xf32>) -> tensor<1x56x57x2x32xf32> {
+ %0 = tensor.empty() : tensor<1x56x57x64xf32>
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<56x57x1x64xf32>)
+ outs(%0 : tensor<1x56x57x64xf32>)
+ permutation = [2, 0, 1, 3]
+
+ %1 = tensor.empty() : tensor<1x56x57x2x32xf32>
+ %pack = tensor.pack %transposed
+ inner_dims_pos = [3]
+ inner_tiles = [32]
+ into %1 : tensor<1x56x57x64xf32> -> tensor<1x56x57x2x32xf32>
+ return %pack : tensor<1x56x57x2x32xf32>
+}
+// CHECK: func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
+// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x56x57x2x32xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [2, 0, 1, 3]
+// CHECK-SAME: inner_dims_pos = [3] inner_tiles = [32]
+// CHECK-SAME: into %[[INIT]]
+// CHECK: return %[[PACK]]
+
+// -----
+
+func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(%arg0: tensor<?x?x?x?xf32>, %transpose_dest: tensor<?x?x?x?xf32>, %pack_dest: tensor<?x?x?x?x?x?x?xf32>, %tile_p : index, %tile_q : index, %tile_r : index) -> tensor<?x?x?x?x?x?x?xf32> {
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<?x?x?x?xf32>)
+ outs(%transpose_dest : tensor<?x?x?x?xf32>)
+ permutation = [2, 3, 0, 1]
+
+ %pack = tensor.pack %transposed
+ outer_dims_perm = [3, 0, 2, 1]
+ inner_dims_pos = [1, 3, 2]
+ inner_tiles = [%tile_p, %tile_q, %tile_r]
+ into %pack_dest : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
+ return %pack : tensor<?x?x?x?x?x?x?xf32>
+}
+// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
+// CHECK: module {
+// CHECK: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[TRANSPOSE_DEST:.+]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME: %[[PACK_DEST:.+]]: tensor<?x?x?x?x?x?x?xf32>,
+// CHECK-SAME: %[[ARG1:.+]]: index, %[[ARG2:.+]]: index,
+// CHECK-SAME: %[[ARG3:.+]]: index)
+// CHECK: %[[c0:.+]] = arith.constant 0 : index
+// CHECK: %[[c1:.+]] = arith.constant 1 : index
+// CHECK: %[[c2:.+]] = arith.constant 2 : index
+// CHECK: %[[c3:.+]] = arith.constant 3 : index
+// CHECK: %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c0]] : tensor<?x?x?x?xf32>
+// CHECK: %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<?x?x?x?xf32>
+// CHECK: %[[dim_1:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<?x?x?x?xf32>
+// CHECK: %[[dim_2:.+]] = tensor.dim %[[ARG0]], %[[c3]] : tensor<?x?x?x?xf32>
+// CHECK: %[[mapped_dim0:.+]] = affine.apply #[[map:.+]]()[%[[dim_0]], %[[ARG1]]]
+// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim_2]], %[[ARG2]]]
+// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[map:.+]]()[%[[dim_1]], %[[ARG3]]]
+// CHECK: %[[INIT:.+]] = tensor.empty(%[[mapped_dim0]], %[[mapped_dim2]], %[[dim]], %[[mapped_dim1]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : tensor<?x?x?x?x?x?x?xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [1, 3, 2] inner_tiles = [%[[ARG1]], %[[ARG2]], %[[ARG3]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
+// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
+// CHECK: }
>From 9b40138f3df4adc0a9874251da026f3e61790937 Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Fri, 15 Dec 2023 21:19:15 +0000
Subject: [PATCH 2/8] Rectify incorrect failure message
---
.../Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index d9dc365c7e85cd..2c45cd3500fa94 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -195,12 +195,8 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
packOp, transposeOp, newOuterDimsPermVec, newInnerDimsPosVec,
newMixedInnerTilesVec, /*isTransposeProducer*/ true);
- if (!foldingPossible) {
- return rewriter.notifyMatchFailure(
- transposeOp,
- "Cannot fold in tensor.pack if a tile dimension was transposed "
- "with a non-tile dimension in linalg.transpose.");
- }
+ if (!foldingPossible)
+ return failure();
Value output = packOp.createDestinationTensor(
rewriter, packOp.getLoc(), transposeOp.getOperand(0),
>From 425ef2ae2be813707ef7b279d7802910954e0b7d Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Wed, 20 Dec 2023 07:31:24 +0000
Subject: [PATCH 3/8] Use applyPermutationToVector utility
---
.../FoldIntoPackAndUnpackPatterns.cpp | 111 ++++++------------
1 file changed, 39 insertions(+), 72 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index 2c45cd3500fa94..986ae2e66b5fbc 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -9,6 +9,7 @@
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
+#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
@@ -21,57 +22,6 @@ static bool areAllConstantIntValue(ArrayRef<OpFoldResult> ofrs, int64_t value) {
ofrs, [&](OpFoldResult ofr) { return isConstantIntValue(ofr, value); });
}
-/// Helper function to generate an equivalent permutation map for
-/// `linalg.transpose` and `tensor.pack` which will be used after their folding
-/// into a `tensor.pack`.
-static bool getRemappedPermutationForTransposeAndPack(
- PackOp packOp, linalg::TransposeOp transposeOp,
- SmallVector<int64_t> &newOuterDimsPermVec,
- SmallVector<int64_t> &newInnerDimsPosVec,
- SmallVector<OpFoldResult> &newMixedInnerTilesVec,
- bool isTransposeProducer) {
- bool foldingPossible = true;
- auto innerDimsPos = packOp.getInnerDimsPos();
- auto mixedInnerTiles = packOp.getMixedTiles();
- auto outerDimsPerm = packOp.getOuterDimsPerm();
- auto transposePerm = transposeOp.getPermutation();
- int64_t srcRank = packOp.getSourceRank();
-
- // Note: if isTransposeProducer = true, transposePerm.size() = srcRank, else
- // transposePerm.size() > srcRank
-
- // Process transpose operation for non-tiled outer dimensions
- for (unsigned int i = 0; i < srcRank; ++i) {
- int64_t remappedPosition =
- isTransposeProducer ? (!outerDimsPerm.empty() ? outerDimsPerm[i] : i)
- : transposePerm[i];
-
- if (remappedPosition >= srcRank) {
- foldingPossible = false;
- return foldingPossible;
- }
-
- remappedPosition =
- isTransposeProducer
- ? transposePerm[remappedPosition]
- : (!outerDimsPerm.empty() ? outerDimsPerm[remappedPosition]
- : remappedPosition);
-
- newOuterDimsPermVec.push_back(remappedPosition);
- }
-
- // Process transpose operation for tiled inner dimensions
- for (unsigned int i = srcRank; i < srcRank + mixedInnerTiles.size(); ++i) {
- int64_t remappedPosition =
- isTransposeProducer ? i - srcRank : transposePerm[i] - srcRank;
-
- newMixedInnerTilesVec.push_back(mixedInnerTiles[remappedPosition]);
- newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
- }
-
- return foldingPossible;
-}
-
/// Fold a `pad` -> `pack` into `pack` if they have the same padding values and
/// the pad op has zero low paddings, or if `pack` has no padding values.
struct FoldPadWithPackOp : public OpRewritePattern<PackOp> {
@@ -147,19 +97,39 @@ struct FoldProducerPackWithConsumerLinalgTransposeOp
if (!packOp)
return failure();
+ auto innerDimsPos = packOp.getInnerDimsPos();
+ auto mixedInnerTiles = packOp.getMixedTiles();
+ auto outerDimsPerm = packOp.getOuterDimsPerm();
+ auto transposePerm = transposeOp.getPermutation();
SmallVector<int64_t> newOuterDimsPermVec;
SmallVector<int64_t> newInnerDimsPosVec;
SmallVector<OpFoldResult> newMixedInnerTilesVec;
+ int64_t srcRank = packOp.getSourceRank();
+
+ // Process transpose operation for non-tiled outer dimensions
+ for (unsigned int i = 0; i < srcRank; ++i) {
+ int64_t remappedPosition = transposePerm[i];
+
+ // If tensor.pack has outer_dims_perm attribute, then consider it during
+ // index remapping.
+ if (!outerDimsPerm.empty()) {
+ if (transposePerm[i] >= srcRank) {
+ return rewriter.notifyMatchFailure(
+ transposeOp,
+ "Cannot fold in tensor.pack if a tile dimension was transposed "
+ "with a non-tile dimension in linalg.transpose.");
+ }
+ remappedPosition = outerDimsPerm[remappedPosition];
+ }
+
+ newOuterDimsPermVec.push_back(remappedPosition);
+ }
- bool foldingPossible = getRemappedPermutationForTransposeAndPack(
- packOp, transposeOp, newOuterDimsPermVec, newInnerDimsPosVec,
- newMixedInnerTilesVec, /*isTransposeProducer*/ false);
-
- if (!foldingPossible) {
- return rewriter.notifyMatchFailure(
- transposeOp,
- "Cannot fold in tensor.pack if a tile dimension was transposed "
- "with a non-tile dimension in linalg.transpose.");
+ // Process transpose operation for tiled inner dimensions
+ for (unsigned int i = srcRank; i < transposePerm.size(); ++i) {
+ int64_t remappedPosition = transposePerm[i] - srcRank;
+ newMixedInnerTilesVec.push_back(mixedInnerTiles[remappedPosition]);
+ newInnerDimsPosVec.push_back(innerDimsPos[remappedPosition]);
}
Value output = packOp.createDestinationTensor(
@@ -187,24 +157,21 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
if (!transposeOp)
return failure();
- SmallVector<int64_t> newOuterDimsPermVec;
- SmallVector<int64_t> newInnerDimsPosVec;
- SmallVector<OpFoldResult> newMixedInnerTilesVec;
-
- bool foldingPossible = getRemappedPermutationForTransposeAndPack(
- packOp, transposeOp, newOuterDimsPermVec, newInnerDimsPosVec,
- newMixedInnerTilesVec, /*isTransposeProducer*/ true);
+ auto outerDimsPerm = packOp.getOuterDimsPerm();
+ SmallVector<int64_t> newOuterDimsPermVec =
+ static_cast<SmallVector<int64_t>>(transposeOp.getPermutation());
- if (!foldingPossible)
- return failure();
+ if (!outerDimsPerm.empty()) {
+ applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
+ }
Value output = packOp.createDestinationTensor(
rewriter, packOp.getLoc(), transposeOp.getOperand(0),
- newMixedInnerTilesVec, newInnerDimsPosVec, newOuterDimsPermVec);
+ packOp.getMixedTiles(), packOp.getInnerDimsPos(), newOuterDimsPermVec);
rewriter.replaceOpWithNewOp<PackOp>(
- packOp, transposeOp.getOperand(0), output, newInnerDimsPosVec,
- newMixedInnerTilesVec, packOp.getPaddingValue(), newOuterDimsPermVec);
+ packOp, transposeOp.getOperand(0), output, packOp.getInnerDimsPos(),
+ packOp.getMixedTiles(), packOp.getPaddingValue(), newOuterDimsPermVec);
return success();
}
>From 9dd0e8b5f0ca83df19401558459121bfe3a7dba6 Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Wed, 20 Dec 2023 07:40:59 +0000
Subject: [PATCH 4/8] Add negative test
---
.../Tensor/fold-into-pack-and-unpack.mlir | 23 +++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index ed101883a40f9a..3c8dc01e7c5061 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -460,3 +460,26 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [1, 3, 2] inner_tiles = [%[[ARG1]], %[[ARG2]], %[[ARG3]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
// CHECK: }
+
+// -----
+
+func.func @linalg_transpose_tensor_cast_tensor_pack_fold(%arg0: tensor<56x57x1x64xf32>) -> tensor<1x57x56x2x32xf32> {
+ %0 = tensor.empty() : tensor<1x56x57x64xf32>
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<56x57x1x64xf32>)
+ outs(%0 : tensor<1x56x57x64xf32>)
+ permutation = [2, 0, 1, 3]
+
+ %transposed_cast = tensor.cast %transposed : tensor<1x56x57x64xf32> to tensor<?x56x57x64xf32>
+ %1 = tensor.empty() : tensor<1x57x56x2x32xf32>
+ %pack = tensor.pack %transposed_cast
+ outer_dims_perm = [0, 2, 1, 3]
+ inner_dims_pos = [3]
+ inner_tiles = [32]
+ into %1 : tensor<?x56x57x64xf32> -> tensor<1x57x56x2x32xf32>
+ return %pack : tensor<1x57x56x2x32xf32>
+}
+// CHECK: func @linalg_transpose_tensor_cast_tensor_pack_fold(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
+// CHECK: linalg.transpose
+// CHECK: tensor.pack
>From 069dc64d6479ca2360cafed01217c69089ea121e Mon Sep 17 00:00:00 2001
From: Prathamesh Tagore <prathameshtagore at gmail.com>
Date: Sun, 31 Dec 2023 19:50:16 +0530
Subject: [PATCH 5/8] Handle the case when inner_dims_pos was transposed
---
.../FoldIntoPackAndUnpackPatterns.cpp | 16 +++--
.../Tensor/fold-into-pack-and-unpack.mlir | 63 ++++++++++++++-----
2 files changed, 58 insertions(+), 21 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index 986ae2e66b5fbc..d55428e4ec5838 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -157,20 +157,28 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
if (!transposeOp)
return failure();
+ auto transposePermutation = transposeOp.getPermutation();
auto outerDimsPerm = packOp.getOuterDimsPerm();
- SmallVector<int64_t> newOuterDimsPermVec =
- static_cast<SmallVector<int64_t>>(transposeOp.getPermutation());
+ auto innerDimsPos = packOp.getInnerDimsPos();
+ SmallVector<int64_t> newInnerDimsPosVec;
+ SmallVector<int64_t> newOuterDimsPermVec = to_vector(transposePermutation);
if (!outerDimsPerm.empty()) {
applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
}
+ for (auto dim : innerDimsPos) {
+ newInnerDimsPosVec.push_back(std::find(transposePermutation.begin(),
+ transposePermutation.end(), dim) -
+ transposePermutation.begin());
+ }
+
Value output = packOp.createDestinationTensor(
rewriter, packOp.getLoc(), transposeOp.getOperand(0),
- packOp.getMixedTiles(), packOp.getInnerDimsPos(), newOuterDimsPermVec);
+ packOp.getMixedTiles(), newInnerDimsPosVec, newOuterDimsPermVec);
rewriter.replaceOpWithNewOp<PackOp>(
- packOp, transposeOp.getOperand(0), output, packOp.getInnerDimsPos(),
+ packOp, transposeOp.getOperand(0), output, newInnerDimsPosVec,
packOp.getMixedTiles(), packOp.getPaddingValue(), newOuterDimsPermVec);
return success();
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index 3c8dc01e7c5061..ad3852f3301765 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -425,6 +425,36 @@ func.func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(%arg0: tensor<56
// -----
+func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(%arg0: tensor<25x30x35x40xf32>, %transpose_dest: tensor<35x40x25x30xf32>, %pack_dest: tensor<3x35x5x8x5x10x5xf32>) -> tensor<3x35x5x8x5x10x5xf32> {
+ %transposed = linalg.transpose
+ ins(%arg0 : tensor<25x30x35x40xf32>)
+ outs(%transpose_dest : tensor<35x40x25x30xf32>)
+ permutation = [2, 3, 0, 1]
+
+ %pack = tensor.pack %transposed
+ outer_dims_perm = [3, 0, 2, 1]
+ inner_dims_pos = [1, 3, 2]
+ inner_tiles = [5, 10, 5]
+ into %pack_dest : tensor<35x40x25x30xf32> -> tensor<3x35x5x8x5x10x5xf32>
+ return %pack : tensor<3x35x5x8x5x10x5xf32>
+}
+// CHECK: module {
+// CHECK: func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(
+// CHECK-SAME: %[[ARG0:.+]]: tensor<25x30x35x40xf32>,
+// CHECK-SAME: %[[ARG1:.+]]: tensor<35x40x25x30xf32>,
+// CHECK-SAME: %[[ARG2:.+]]: tensor<3x35x5x8x5x10x5xf32>) -> tensor<3x35x5x8x5x10x5xf32> {
+// CHECK: %[[VAL0:.+]] = tensor.empty() : tensor<3x35x5x8x5x10x5xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
+// CHECK-SAME: outer_dims_perm = [1, 2, 0, 3]
+// CHECK-SAME: inner_dims_pos = [3, 1, 0]
+// CHECK-SAME: inner_tiles = [5, 10, 5]
+// CHECK-SAME: into %[[VAL0]]
+// CHECK: return %[[PACK]]
+// CHECK: }
+// CHECK: }
+
+// -----
+
func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(%arg0: tensor<?x?x?x?xf32>, %transpose_dest: tensor<?x?x?x?xf32>, %pack_dest: tensor<?x?x?x?x?x?x?xf32>, %tile_p : index, %tile_q : index, %tile_r : index) -> tensor<?x?x?x?x?x?x?xf32> {
%transposed = linalg.transpose
ins(%arg0 : tensor<?x?x?x?xf32>)
@@ -441,25 +471,24 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
// CHECK: module {
// CHECK: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
-// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[TRANSPOSE_DEST:.+]]: tensor<?x?x?x?xf32>,
-// CHECK-SAME: %[[PACK_DEST:.+]]: tensor<?x?x?x?x?x?x?xf32>,
-// CHECK-SAME: %[[ARG1:.+]]: index, %[[ARG2:.+]]: index,
-// CHECK-SAME: %[[ARG3:.+]]: index)
-// CHECK: %[[c0:.+]] = arith.constant 0 : index
-// CHECK: %[[c1:.+]] = arith.constant 1 : index
-// CHECK: %[[c2:.+]] = arith.constant 2 : index
-// CHECK: %[[c3:.+]] = arith.constant 3 : index
-// CHECK: %[[dim:.+]] = tensor.dim %[[ARG0]], %[[c0]] : tensor<?x?x?x?xf32>
-// CHECK: %[[dim_0:.+]] = tensor.dim %[[ARG0]], %[[c1]] : tensor<?x?x?x?xf32>
-// CHECK: %[[dim_1:.+]] = tensor.dim %[[ARG0]], %[[c2]] : tensor<?x?x?x?xf32>
-// CHECK: %[[dim_2:.+]] = tensor.dim %[[ARG0]], %[[c3]] : tensor<?x?x?x?xf32>
-// CHECK: %[[mapped_dim0:.+]] = affine.apply #[[map:.+]]()[%[[dim_0]], %[[ARG1]]]
-// CHECK: %[[mapped_dim1:.+]] = affine.apply #[[map:.+]]()[%[[dim_2]], %[[ARG2]]]
-// CHECK: %[[mapped_dim2:.+]] = affine.apply #[[map:.+]]()[%[[dim_1]], %[[ARG3]]]
-// CHECK: %[[INIT:.+]] = tensor.empty(%[[mapped_dim0]], %[[mapped_dim2]], %[[dim]], %[[mapped_dim1]], %[[ARG1]], %[[ARG2]], %[[ARG3]]) : tensor<?x?x?x?x?x?x?xf32>
-// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [1, 3, 2] inner_tiles = [%[[ARG1]], %[[ARG2]], %[[ARG3]]] into %[[INIT]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME: %[[ARG2:.+]]: tensor<?x?x?x?x?x?x?xf32>, %[[ARG3:.+]]: index, %[[ARG4:.+]]: index, %[[ARG5:.+]]: index) -> tensor<?x?x?x?x?x?x?xf32> {
+// CHECK: %[[C0:.+]] = arith.constant 0 : index
+// CHECK: %[[C1:.+]] = arith.constant 1 : index
+// CHECK: %[[C2:.+]] = arith.constant 2 : index
+// CHECK: %[[C3:.+]] = arith.constant 3 : index
+// CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x?x?x?xf32>
+// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?x?xf32>
+// CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?x?xf32>
+// CHECK: %[[DIM2:.+]] = tensor.dim %[[ARG0]], %[[C3]] : tensor<?x?x?x?xf32>
+// CHECK: %[[VAL0:.+]] = affine.apply #[[map]]()[%[[DIM2]], %[[ARG3]]]
+// CHECK: %[[VAL1:.+]] = affine.apply #[[map]]()[%[[DIM0]], %[[ARG4]]]
+// CHECK: %[[VAL2:.+]] = affine.apply #[[map]]()[%[[DIM]], %[[ARG5]]]
+// CHECK: %[[VAL3:.+]] = tensor.empty(%[[VAL1]], %[[DIM1]], %[[VAL2]], %[[VAL0]], %[[ARG3]], %[[ARG4]], %[[ARG5]]) : tensor<?x?x?x?x?x?x?xf32>
+// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [3, 1, 0] inner_tiles = [%[[ARG3]], %[[ARG4]], %[[ARG5]]] into %[[VAL3]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
// CHECK: }
+// CHECK: }
// -----
>From ea61e9eddf5e99314df6f6e20109cbf1aeba301a Mon Sep 17 00:00:00 2001
From: Prathamesh Tagore <prathameshtagore at gmail.com>
Date: Sun, 31 Dec 2023 19:58:27 +0530
Subject: [PATCH 6/8] Removes braces around if block
---
.../Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index d55428e4ec5838..62bb78caa40ee8 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -163,9 +163,8 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
SmallVector<int64_t> newInnerDimsPosVec;
SmallVector<int64_t> newOuterDimsPermVec = to_vector(transposePermutation);
- if (!outerDimsPerm.empty()) {
+ if (!outerDimsPerm.empty())
applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
- }
for (auto dim : innerDimsPos) {
newInnerDimsPosVec.push_back(std::find(transposePermutation.begin(),
>From 0db52470433adf0641fa1a0fec1063ad707dc3ca Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Tue, 2 Jan 2024 18:47:04 +0000
Subject: [PATCH 7/8] Use llvm::find instead of std::find
---
.../Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index 62bb78caa40ee8..397eec5b5d87b6 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -167,8 +167,7 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
for (auto dim : innerDimsPos) {
- newInnerDimsPosVec.push_back(std::find(transposePermutation.begin(),
- transposePermutation.end(), dim) -
+ newInnerDimsPosVec.push_back(find(transposePermutation, dim) -
transposePermutation.begin());
}
>From cd5491d19d6e775c96f0b21745120542f61ce8ec Mon Sep 17 00:00:00 2001
From: meshtag <prathameshtagore at gmail.com>
Date: Wed, 3 Jan 2024 16:57:08 +0000
Subject: [PATCH 8/8] Address suggested changes
---
.../FoldIntoPackAndUnpackPatterns.cpp | 5 ++--
.../Tensor/fold-into-pack-and-unpack.mlir | 26 +++++++------------
2 files changed, 13 insertions(+), 18 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
index 397eec5b5d87b6..6dcc3c9fd3b187 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/FoldIntoPackAndUnpackPatterns.cpp
@@ -161,13 +161,14 @@ struct FoldConsumerPackWithProducerLinalgTransposeOp
auto outerDimsPerm = packOp.getOuterDimsPerm();
auto innerDimsPos = packOp.getInnerDimsPos();
SmallVector<int64_t> newInnerDimsPosVec;
- SmallVector<int64_t> newOuterDimsPermVec = to_vector(transposePermutation);
+ SmallVector<int64_t> newOuterDimsPermVec =
+ llvm::to_vector(transposePermutation);
if (!outerDimsPerm.empty())
applyPermutationToVector(newOuterDimsPermVec, outerDimsPerm);
for (auto dim : innerDimsPos) {
- newInnerDimsPosVec.push_back(find(transposePermutation, dim) -
+ newInnerDimsPosVec.push_back(llvm::find(transposePermutation, dim) -
transposePermutation.begin());
}
diff --git a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
index ad3852f3301765..9640535753af02 100644
--- a/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/fold-into-pack-and-unpack.mlir
@@ -363,7 +363,7 @@ func.func @linalg_transpose_tensor_pack_fold(%arg0: tensor<56x57x1x64xf32>) -> t
into %1 : tensor<1x56x57x64xf32> -> tensor<1x57x56x2x32xf32>
return %pack : tensor<1x57x56x2x32xf32>
}
-// CHECK: func @linalg_transpose_tensor_pack_fold(
+//CHECK-LABEL: func @linalg_transpose_tensor_pack_fold(
// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
@@ -389,7 +389,7 @@ func.func @linalg_transpose_tensor_pack_fold_with_padding(%arg0: tensor<56x57x1x
into %1 : tensor<1x56x57x55xf32> -> tensor<1x57x56x2x32xf32>
return %pack : tensor<1x57x56x2x32xf32>
}
-// CHECK: func @linalg_transpose_tensor_pack_fold_with_padding(
+//CHECK-LABEL: func @linalg_transpose_tensor_pack_fold_with_padding(
// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x55xf32>, %[[PADDING:.+]]: f32)
// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x57x56x2x32xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] padding_value(%[[PADDING]] : f32)
@@ -414,7 +414,7 @@ func.func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(%arg0: tensor<56
into %1 : tensor<1x56x57x64xf32> -> tensor<1x56x57x2x32xf32>
return %pack : tensor<1x56x57x2x32xf32>
}
-// CHECK: func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(
+//CHECK-LABEL: func @linalg_transpose_tensor_pack_fold_no_outer_dims_perm(
// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
// CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x56x57x2x32xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]]
@@ -438,8 +438,7 @@ func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(%arg0: te
into %pack_dest : tensor<35x40x25x30xf32> -> tensor<3x35x5x8x5x10x5xf32>
return %pack : tensor<3x35x5x8x5x10x5xf32>
}
-// CHECK: module {
-// CHECK: func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(
+//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(
// CHECK-SAME: %[[ARG0:.+]]: tensor<25x30x35x40xf32>,
// CHECK-SAME: %[[ARG1:.+]]: tensor<35x40x25x30xf32>,
// CHECK-SAME: %[[ARG2:.+]]: tensor<3x35x5x8x5x10x5xf32>) -> tensor<3x35x5x8x5x10x5xf32> {
@@ -450,8 +449,6 @@ func.func @linalg_transpose_tensor_pack_fold_complex_inner_dims_change(%arg0: te
// CHECK-SAME: inner_tiles = [5, 10, 5]
// CHECK-SAME: into %[[VAL0]]
// CHECK: return %[[PACK]]
-// CHECK: }
-// CHECK: }
// -----
@@ -468,9 +465,8 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
into %pack_dest : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
return %pack : tensor<?x?x?x?x?x?x?xf32>
}
-// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
-// CHECK: module {
-// CHECK: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
+// CHECK: #[[map:.+]] = affine_map<()[s0, s1] -> (s0 ceildiv s1)>
+//CHECK-LABEL: func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_sizes(
// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?x?xf32>,
// CHECK-SAME: %[[ARG2:.+]]: tensor<?x?x?x?x?x?x?xf32>, %[[ARG3:.+]]: index, %[[ARG4:.+]]: index, %[[ARG5:.+]]: index) -> tensor<?x?x?x?x?x?x?xf32> {
// CHECK: %[[C0:.+]] = arith.constant 0 : index
@@ -481,14 +477,12 @@ func.func @linalg_transpose_tensor_pack_fold_dynamic_outer_dims_tile_dims_tile_s
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C1]] : tensor<?x?x?x?xf32>
// CHECK: %[[DIM1:.+]] = tensor.dim %[[ARG0]], %[[C2]] : tensor<?x?x?x?xf32>
// CHECK: %[[DIM2:.+]] = tensor.dim %[[ARG0]], %[[C3]] : tensor<?x?x?x?xf32>
-// CHECK: %[[VAL0:.+]] = affine.apply #[[map]]()[%[[DIM2]], %[[ARG3]]]
-// CHECK: %[[VAL1:.+]] = affine.apply #[[map]]()[%[[DIM0]], %[[ARG4]]]
-// CHECK: %[[VAL2:.+]] = affine.apply #[[map]]()[%[[DIM]], %[[ARG5]]]
+// CHECK: %[[VAL0:.+]] = affine.apply #[[map:.+]]()[%[[DIM2]], %[[ARG3]]]
+// CHECK: %[[VAL1:.+]] = affine.apply #[[map:.+]]()[%[[DIM0]], %[[ARG4]]]
+// CHECK: %[[VAL2:.+]] = affine.apply #[[map:.+]]()[%[[DIM]], %[[ARG5]]]
// CHECK: %[[VAL3:.+]] = tensor.empty(%[[VAL1]], %[[DIM1]], %[[VAL2]], %[[VAL0]], %[[ARG3]], %[[ARG4]], %[[ARG5]]) : tensor<?x?x?x?x?x?x?xf32>
// CHECK: %[[PACK:.+]] = tensor.pack %[[ARG0]] outer_dims_perm = [1, 2, 0, 3] inner_dims_pos = [3, 1, 0] inner_tiles = [%[[ARG3]], %[[ARG4]], %[[ARG5]]] into %[[VAL3]] : tensor<?x?x?x?xf32> -> tensor<?x?x?x?x?x?x?xf32>
// CHECK: return %[[PACK]] : tensor<?x?x?x?x?x?x?xf32>
-// CHECK: }
-// CHECK: }
// -----
@@ -508,7 +502,7 @@ func.func @linalg_transpose_tensor_cast_tensor_pack_fold(%arg0: tensor<56x57x1x6
into %1 : tensor<?x56x57x64xf32> -> tensor<1x57x56x2x32xf32>
return %pack : tensor<1x57x56x2x32xf32>
}
-// CHECK: func @linalg_transpose_tensor_cast_tensor_pack_fold(
+//CHECK-LABEL: func @linalg_transpose_tensor_cast_tensor_pack_fold(
// CHECK-SAME: %[[ARG0:.+]]: tensor<56x57x1x64xf32>)
// CHECK: linalg.transpose
// CHECK: tensor.pack
More information about the Mlir-commits
mailing list