[Mlir-commits] [mlir] [mlir] [linalg] Add canonicalize pattern to swap transpose with broadcast (PR #97063)
donald chen
llvmlistbot at llvm.org
Fri Jun 28 07:36:05 PDT 2024
https://github.com/cxy-1993 created https://github.com/llvm/llvm-project/pull/97063
Add canonicalize pattern that implement canonicalize:
transpose(broadcast(input)) -> broadcast(transpose(input))
Reduce the cost of transpose.
>From 701f13326d5c979013e63e99d7861026896f3890 Mon Sep 17 00:00:00 2001
From: cxy <chenxunyu1993 at gmail.com>
Date: Thu, 27 Jun 2024 00:00:03 +0800
Subject: [PATCH] [mlir] [linalg] Add canonicalize pattern to swap transpose
with broadcast
Add canonicalize pattern that implement canonicalize:
transpose(broadcast(input)) -> broadcast(transpose(input))
Reduce the cost of transpose.
---
.../mlir/Dialect/Utils/IndexingUtils.h | 8 +++
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 61 ++++++++++++++++++-
mlir/lib/Dialect/Utils/IndexingUtils.cpp | 26 ++++++++
mlir/test/Dialect/Linalg/canonicalize.mlir | 53 +++++++++++++++-
4 files changed, 146 insertions(+), 2 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Utils/IndexingUtils.h b/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
index b774359552aa5..6428409889179 100644
--- a/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
+++ b/mlir/include/mlir/Dialect/Utils/IndexingUtils.h
@@ -243,6 +243,14 @@ SmallVector<int64_t>
computePermutationVector(int64_t permSize, ArrayRef<int64_t> positions,
ArrayRef<int64_t> desiredPositions);
+/// Returns a permutation vector that remove the result position in
+/// removePositions from inputPerm.
+///
+/// For example, inputPerm = {2, 4, 0, 1, 3} and removePositions = {1, 2} would
+/// result in a {2, 0, 1} permutation vector.
+SmallVector<int64_t> removePermutation(ArrayRef<int64_t> inputPerm,
+ ArrayRef<int64_t> removePositions);
+
/// Helper to return a subset of `arrayAttr` as a vector of int64_t.
// TODO: Port everything relevant to DenseArrayAttr and drop this util.
SmallVector<int64_t> getI64SubArray(ArrayAttr arrayAttr, unsigned dropFront = 0,
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index 57d126603ebd7..9e0ac5354139f 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -1890,9 +1890,68 @@ struct FoldTransposeWithTranspose : OpRewritePattern<linalg::TransposeOp> {
}
};
+/// This pattern reduces the cost of transpose by swapping the order of
+/// broadcast and transpose:
+/// transpose(broadcast(input)) -> broadcast(transpose(input))
+struct SwapTransposeWithBroadcast : OpRewritePattern<linalg::TransposeOp> {
+ using OpRewritePattern<linalg::TransposeOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(linalg::TransposeOp transposeOp,
+ PatternRewriter &rewriter) const override {
+ Value input = transposeOp.getInput();
+ BroadcastOp broadcastOp = input.getDefiningOp<BroadcastOp>();
+ if (!input.hasOneUse() || !broadcastOp)
+ return failure();
+
+ ArrayRef<int64_t> dimensions = broadcastOp.getDimensions();
+ ArrayRef<int64_t> perms = transposeOp.getPermutation();
+
+ // Get new perms and new dimensions.
+ SmallVector<int64_t> resultPerms = removePermutation(perms, dimensions);
+ SmallVector<int64_t> resultDimensions;
+ SmallVector<int64_t> invertPerm = invertPermutationVector(perms);
+ for (unsigned i = 0; i < dimensions.size(); i++) {
+ resultDimensions.push_back(invertPerm[dimensions[i]]);
+ }
+ llvm::sort(resultDimensions);
+
+ // Create transpose result.
+ Value broadcastInput = broadcastOp.getInput();
+ Location loc = transposeOp.getLoc();
+ MLIRContext *ctx = transposeOp.getContext();
+ SmallVector<OpFoldResult> dims;
+ auto broadcastInputTy =
+ mlir::cast<RankedTensorType>(broadcastInput.getType());
+ for (unsigned i = 0; i < broadcastInputTy.getRank(); i++) {
+ if (broadcastInputTy.isDynamicDim(i)) {
+ dims.push_back(rewriter.create<tensor::DimOp>(loc, broadcastInput, i)
+ ->getResult(0));
+ } else {
+ dims.push_back(IntegerAttr::get(IndexType::get(ctx),
+ broadcastInputTy.getDimSize(i)));
+ }
+ }
+ SmallVector<OpFoldResult> transposeResultShapes =
+ applyPermutation(dims, resultPerms);
+ Value transposeInit = rewriter.create<tensor::EmptyOp>(
+ transposeOp.getLoc(), transposeResultShapes,
+ broadcastInputTy.getElementType());
+
+ // Create broadcast(transpose(input)).
+ Value transposeResult =
+ rewriter
+ .create<TransposeOp>(loc, broadcastOp.getInput(), transposeInit,
+ resultPerms)
+ ->getResult(0);
+ rewriter.replaceOpWithNewOp<BroadcastOp>(
+ transposeOp, transposeResult, transposeOp.getInit(), resultDimensions);
+ return success();
+ }
+};
+
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<FoldTransposeWithTranspose>(context);
+ results.add<FoldTransposeWithTranspose, SwapTransposeWithBroadcast>(context);
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Utils/IndexingUtils.cpp b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
index aba225be720c3..d1822a3f1f95f 100644
--- a/mlir/lib/Dialect/Utils/IndexingUtils.cpp
+++ b/mlir/lib/Dialect/Utils/IndexingUtils.cpp
@@ -252,6 +252,32 @@ mlir::computePermutationVector(int64_t permSize, ArrayRef<int64_t> positions,
return res;
}
+SmallVector<int64_t>
+mlir::removePermutation(ArrayRef<int64_t> inputPerm,
+ ArrayRef<int64_t> removePositions) {
+ assert(inputPerm.size() >= removePositions.size() &&
+ "expect inputPerm size large than position to remove");
+ SmallVector<int64_t> res;
+ for (unsigned inputIndex = 0; inputIndex < inputPerm.size(); inputIndex++) {
+ int64_t targetIndex = inputPerm[inputIndex];
+ bool shouldRemove = false;
+ for (unsigned removeIndex = 0; removeIndex < removePositions.size();
+ removeIndex++) {
+ if (removePositions[removeIndex] == inputPerm[inputIndex]) {
+ shouldRemove = true;
+ break;
+ }
+ if (removePositions[removeIndex] < inputPerm[inputIndex]) {
+ targetIndex--;
+ }
+ }
+ if (!shouldRemove) {
+ res.push_back(targetIndex);
+ }
+ }
+ return res;
+}
+
SmallVector<int64_t> mlir::getI64SubArray(ArrayAttr arrayAttr,
unsigned dropFront,
unsigned dropBack) {
diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir
index 928030a81dc02..30a8d76fc73ac 100644
--- a/mlir/test/Dialect/Linalg/canonicalize.mlir
+++ b/mlir/test/Dialect/Linalg/canonicalize.mlir
@@ -1017,7 +1017,7 @@ func.func @broadcast_same_shape(%input: tensor<2x3xf32>, %init: tensor<2x3xf32>)
return %0 : tensor<2x3xf32>
}
-// ----
+// -----
func.func @transpose_1d(%input: tensor<16xf32>,
%init: tensor<16xf32>) -> tensor<16xf32> {
@@ -1096,3 +1096,54 @@ func.func @transpose_transpose_fold(%input: tensor<5x4x3xf32>,
func.return %transpose2 : tensor<3x4x5xf32>
}
+// -----
+
+func.func @broadcast_transpose_fold(%input: tensor<2x4x5xf32>,
+ %init1: tensor<1x2x3x4x5x6xf32>,
+ %init2: tensor<1x3x2x6x5x4xf32>) -> tensor<1x3x2x6x5x4xf32> {
+ // CHECK-LABEL: @broadcast_transpose_fold
+ // CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<2x4x5xf32>
+ // CHECK-SAME: %[[INIT1:[a-zA-Z0-9]+]]: tensor<1x2x3x4x5x6xf32>
+ // CHECK-SAME: %[[INIT2:[a-zA-Z0-9]+]]: tensor<1x3x2x6x5x4xf32>
+ // CHECK: %[[TMP_INIT:.+]] = tensor.empty() : tensor<2x5x4xf32>
+ // CHECK: %[[TRANSPOSE:.+]] = linalg.transpose ins(%[[INPUT]] : tensor<2x4x5xf32>) outs(%[[TMP_INIT]] : tensor<2x5x4xf32>) permutation = [0, 2, 1]
+ // CHECK: %[[BROADCAST:.+]] = linalg.broadcast ins(%[[TRANSPOSE]] : tensor<2x5x4xf32>) outs(%[[INIT2]] : tensor<1x3x2x6x5x4xf32>) dimensions = [0, 1, 3]
+ // CHECK: return %[[BROADCAST]] : tensor<1x3x2x6x5x4xf32>
+ %broadcast = linalg.broadcast
+ ins(%input : tensor<2x4x5xf32>)
+ outs(%init1 : tensor<1x2x3x4x5x6xf32>)
+ dimensions = [0, 2, 5]
+ %transpose = linalg.transpose
+ ins(%broadcast : tensor<1x2x3x4x5x6xf32>)
+ outs(%init2 : tensor<1x3x2x6x5x4xf32>)
+ permutation = [0, 2, 1, 5, 4, 3]
+ func.return %transpose : tensor<1x3x2x6x5x4xf32>
+}
+
+// -----
+
+func.func @broadcast_transpose_fold_dynamic(%input: tensor<?x?x5xf32>,
+ %init1: tensor<1x?x3x?x5x6xf32>,
+ %init2: tensor<1x3x?x6x5x?xf32>) -> tensor<1x3x?x6x5x?xf32> {
+ // CHECK-LABEL: @broadcast_transpose_fold_dynamic
+ // CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x5xf32>
+ // CHECK-SAME: %[[INIT1:[a-zA-Z0-9]+]]: tensor<1x?x3x?x5x6xf32>
+ // CHECK-SAME: %[[INIT2:[a-zA-Z0-9]+]]: tensor<1x3x?x6x5x?xf32>
+ // CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
+ // CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
+ // CHECK: %[[DIM0:.+]] = tensor.dim %[[INPUT]], %[[C0]] : tensor<?x?x5xf32>
+ // CHECK: %[[DIM1:.+]] = tensor.dim %[[INPUT]], %[[C1]] : tensor<?x?x5xf32>
+ // CHECK: %[[TMP_INIT:.+]] = tensor.empty(%[[DIM1]], %[[DIM0]]) : tensor<?x5x?xf32>
+ // CHECK: %[[TRANSPOSE:.+]] = linalg.transpose ins(%[[INPUT]] : tensor<?x?x5xf32>) outs(%[[TMP_INIT]] : tensor<?x5x?xf32>) permutation = [1, 2, 0]
+ // CHECK: %[[BROADCAST:.+]] = linalg.broadcast ins(%[[TRANSPOSE]] : tensor<?x5x?xf32>) outs(%[[INIT2]] : tensor<1x3x?x6x5x?xf32>) dimensions = [0, 1, 3]
+ // CHECK: return %[[BROADCAST]] : tensor<1x3x?x6x5x?xf32>
+ %broadcast = linalg.broadcast
+ ins(%input : tensor<?x?x5xf32>)
+ outs(%init1 : tensor<1x?x3x?x5x6xf32>)
+ dimensions = [0, 2, 5]
+ %transpose = linalg.transpose
+ ins(%broadcast : tensor<1x?x3x?x5x6xf32>)
+ outs(%init2 : tensor<1x3x?x6x5x?xf32>)
+ permutation = [0, 2, 3, 5, 4, 1]
+ func.return %transpose : tensor<1x3x?x6x5x?xf32>
+}
More information about the Mlir-commits
mailing list