[Mlir-commits] [mlir] [mlir][tensor] Enhance SimplifyPackToExpandShape for unit dim cases. (PR #79247)
Han-Chung Wang
llvmlistbot at llvm.org
Wed Jan 24 18:40:21 PST 2024
https://github.com/hanhanW updated https://github.com/llvm/llvm-project/pull/79247
>From caa67d56d6859ed5b9feed9c2190e80b74c7237a Mon Sep 17 00:00:00 2001
From: hanhanW <hanhan0912 at gmail.com>
Date: Wed, 24 Jan 2024 11:18:46 +0800
Subject: [PATCH 1/3] [mlir][tensor] Enhance SimplifyPackToExpandShape for unit
dim cases.
---
.../Transforms/PackAndUnpackPatterns.cpp | 54 ++++++++++++++++---
.../Dialect/Tensor/simplify-pack-unpack.mlir | 51 ++++++++++++++++++
2 files changed, 97 insertions(+), 8 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
index 06be017f24b823e..354fe0f0b20a608 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
@@ -11,6 +11,7 @@
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/IR/PatternMatch.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/Support/Debug.h"
namespace mlir {
@@ -22,6 +23,12 @@ static bool areAllConstantIntValue(ArrayRef<OpFoldResult> ofrs, int64_t value) {
ofrs, [&](OpFoldResult ofr) { return isConstantIntValue(ofr, value); });
}
+/// Returns the number of shape sizes that is either dynamic or greater than 1.
+static int64_t getNumGtOneDims(ArrayRef<int64_t> shape) {
+ return llvm::count_if(
+ shape, [](int64_t v) { return ShapedType::isDynamic(v) || v > 1; });
+}
+
/// Packing one-dimensional tensor can be expressed as an expand shape op.
struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
using OpRewritePattern<PackOp>::OpRewritePattern;
@@ -34,11 +41,9 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
reassociation);
}
- LogicalResult matchAndRewrite(PackOp packOp,
- PatternRewriter &rewriter) const override {
- if (packOp.getPaddingValue())
- return rewriter.notifyMatchFailure(packOp, "expects no padding value");
-
+ /// Returns success() if it is only packing on the innermost dimension.
+ LogicalResult isPackOneInnerMostDim(RewriterBase &rewriter,
+ PackOp packOp) const {
auto outerDimsPerm = packOp.getOuterDimsPerm();
if (!outerDimsPerm.empty() && !isIdentityPermutation(outerDimsPerm)) {
return rewriter.notifyMatchFailure(
@@ -46,14 +51,47 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
"expects outer_dims_perm is empty or an identity permutation");
}
- RankedTensorType sourceType = packOp.getSourceType();
- RankedTensorType destType = packOp.getDestType();
+ int64_t srcRank = packOp.getSourceRank();
ArrayRef<int64_t> dimsPos = packOp.getInnerDimsPos();
- if (dimsPos.size() != 1 || (dimsPos[0] + 1 != sourceType.getRank())) {
+ if (dimsPos.size() != 1 || (dimsPos[0] + 1 != srcRank)) {
return rewriter.notifyMatchFailure(
packOp, "expects packing at the innermost dimension");
}
+ return success();
+ }
+
+ /// Returns success() if there is only 1 dimension size in source being
+ /// greater than 1 and packing only happens on the dimension. It assumes that
+ /// the pack op does not have padding value.
+ LogicalResult isPack1DSrc(RewriterBase &rewriter, PackOp packOp) const {
+ ArrayRef<int64_t> srcShape = packOp.getSourceType().getShape();
+ if (getNumGtOneDims(srcShape) > 1) {
+ return rewriter.notifyMatchFailure(
+ packOp, "expects source is not 1D tensor with unit dims");
+ }
+ // The pack op does not have padding value. Non-unit inner tile size must be
+ // be used by the non-unit dimension.
+ SmallVector<int64_t> innerTiles = packOp.getStaticTiles();
+ if (getNumGtOneDims(innerTiles) > 1) {
+ return rewriter.notifyMatchFailure(
+ packOp, "expects has at most one non-unit inner tiles");
+ }
+
+ return success();
+ }
+
+ LogicalResult matchAndRewrite(PackOp packOp,
+ PatternRewriter &rewriter) const override {
+ if (packOp.getPaddingValue())
+ return rewriter.notifyMatchFailure(packOp, "expects no padding value");
+
+ if (failed(isPackOneInnerMostDim(rewriter, packOp)) &&
+ failed(isPack1DSrc(rewriter, packOp)))
+ return failure();
+
+ RankedTensorType sourceType = packOp.getSourceType();
+ RankedTensorType destType = packOp.getDestType();
auto reassociation =
getReassociationIndicesForReshape(sourceType, destType);
if (!reassociation)
diff --git a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
index 82bfe6fe8689ab5..00bcacb54d01210 100644
--- a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
@@ -83,6 +83,57 @@ func.func @single_first_inner_dim_packing(%arg0: tensor<256x5xf32>) -> tensor<8x
// -----
+// CHECK-LABEL: func.func @pack_1d_to_1d
+// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1, 2, 3]]
+// CHECK: return %[[EXPANDED]]
+func.func @pack_1d_to_1d(%arg0 : tensor<1x32xf32>) -> tensor<1x32x1x1xf32> {
+ %empty = tensor.empty() : tensor<1x32x1x1xf32>
+ %pack = tensor.pack %arg0 inner_dims_pos = [0, 1] inner_tiles = [1, 1] into %empty
+ : tensor<1x32xf32> -> tensor<1x32x1x1xf32>
+ return %pack : tensor<1x32x1x1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @pack_1x32_to_1x16x1x2
+// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1, 2, 3]]
+// CHECK: return %[[EXPANDED]]
+func.func @pack_1x32_to_1x16x1x2(%arg0 : tensor<1x32xf32>) -> tensor<1x16x1x2xf32> {
+ %empty = tensor.empty() : tensor<1x16x1x2xf32>
+ %pack = tensor.pack %arg0 inner_dims_pos = [0, 1] inner_tiles = [1, 2] into %empty
+ : tensor<1x32xf32> -> tensor<1x16x1x2xf32>
+ return %pack : tensor<1x16x1x2xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @pack_32x1_to_16x1x2x1
+// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]
+// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1, 2], [3]]
+// CHECK: return %[[EXPANDED]]
+func.func @pack_32x1_to_16x1x2x1(%arg0 : tensor<32x1xf32>) -> tensor<1x16x2x1xf32> {
+ %empty = tensor.empty() : tensor<1x16x2x1xf32>
+ %pack = tensor.pack %arg0 outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [2, 1] into %empty
+ : tensor<32x1xf32> -> tensor<1x16x2x1xf32>
+ return %pack : tensor<1x16x2x1xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func.func @pack_32x1_to_16x1x1x2
+// CHECK-NOT: tensor.expand_shape
+// CHECK: tensor.pack
+func.func @pack_32x1_to_16x1x1x2(%arg0 : tensor<32x1xf32>) -> tensor<16x1x1x2xf32> {
+ %empty = tensor.empty() : tensor<16x1x1x2xf32>
+ %pack = tensor.pack %arg0 inner_dims_pos = [1, 0] inner_tiles = [1, 2] into %empty
+ : tensor<32x1xf32> -> tensor<16x1x1x2xf32>
+ return %pack : tensor<16x1x1x2xf32>
+}
+
+// -----
+
// CHECK-LABEL: func.func @unpack_1d_to_collapse
// CHECK-SAME: %[[ARG0:.+]]: tensor<8x32xf32>)
// CHECK: %[[COLLAPSED:.+]] = tensor.collapse_shape %[[ARG0]] {{\[}}[0, 1]] : tensor<8x32xf32> into tensor<256xf32>
>From 1bc4a2ce7d4476ffb70d5fb8d2a08289b91b958a Mon Sep 17 00:00:00 2001
From: hanhanW <hanhan0912 at gmail.com>
Date: Thu, 25 Jan 2024 00:10:40 +0800
Subject: [PATCH 2/3] address comments
---
.../Tensor/Transforms/PackAndUnpackPatterns.cpp | 15 +++++++++------
.../test/Dialect/Tensor/simplify-pack-unpack.mlir | 4 ++--
2 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
index 354fe0f0b20a608..02536a40f36d7a9 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
@@ -42,8 +42,8 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
}
/// Returns success() if it is only packing on the innermost dimension.
- LogicalResult isPackOneInnerMostDim(RewriterBase &rewriter,
- PackOp packOp) const {
+ LogicalResult isPackOnInnerMostDim(RewriterBase &rewriter,
+ PackOp packOp) const {
auto outerDimsPerm = packOp.getOuterDimsPerm();
if (!outerDimsPerm.empty() && !isIdentityPermutation(outerDimsPerm)) {
return rewriter.notifyMatchFailure(
@@ -64,10 +64,12 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
/// greater than 1 and packing only happens on the dimension. It assumes that
/// the pack op does not have padding value.
LogicalResult isPack1DSrc(RewriterBase &rewriter, PackOp packOp) const {
+ assert(!packOp.getPaddingValue() &&
+ "expect the op does not have padding value.");
ArrayRef<int64_t> srcShape = packOp.getSourceType().getShape();
if (getNumGtOneDims(srcShape) > 1) {
return rewriter.notifyMatchFailure(
- packOp, "expects source is not 1D tensor with unit dims");
+ packOp, "expects source to have at most one non-unit dims");
}
// The pack op does not have padding value. Non-unit inner tile size must be
@@ -75,7 +77,7 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
SmallVector<int64_t> innerTiles = packOp.getStaticTiles();
if (getNumGtOneDims(innerTiles) > 1) {
return rewriter.notifyMatchFailure(
- packOp, "expects has at most one non-unit inner tiles");
+ packOp, "expects at most one non-unit inner tiles");
}
return success();
@@ -86,9 +88,10 @@ struct SimplifyPackToExpandShape : public OpRewritePattern<PackOp> {
if (packOp.getPaddingValue())
return rewriter.notifyMatchFailure(packOp, "expects no padding value");
- if (failed(isPackOneInnerMostDim(rewriter, packOp)) &&
- failed(isPack1DSrc(rewriter, packOp)))
+ if (failed(isPackOnInnerMostDim(rewriter, packOp)) &&
+ failed(isPack1DSrc(rewriter, packOp))) {
return failure();
+ }
RankedTensorType sourceType = packOp.getSourceType();
RankedTensorType destType = packOp.getDestType();
diff --git a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
index 00bcacb54d01210..859eb5ee2a7061d 100644
--- a/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
+++ b/mlir/test/Dialect/Tensor/simplify-pack-unpack.mlir
@@ -83,11 +83,11 @@ func.func @single_first_inner_dim_packing(%arg0: tensor<256x5xf32>) -> tensor<8x
// -----
-// CHECK-LABEL: func.func @pack_1d_to_1d
+// CHECK-LABEL: func.func @pack_1x32_to_1x32x1x1
// CHECK-SAME: %[[ARG0:[0-9a-zA-Z]+]]
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0], [1, 2, 3]]
// CHECK: return %[[EXPANDED]]
-func.func @pack_1d_to_1d(%arg0 : tensor<1x32xf32>) -> tensor<1x32x1x1xf32> {
+func.func @pack_1x32_to_1x32x1x1(%arg0 : tensor<1x32xf32>) -> tensor<1x32x1x1xf32> {
%empty = tensor.empty() : tensor<1x32x1x1xf32>
%pack = tensor.pack %arg0 inner_dims_pos = [0, 1] inner_tiles = [1, 1] into %empty
: tensor<1x32xf32> -> tensor<1x32x1x1xf32>
>From cc39ef590a6409c9288c5139c8cb3e80a02c7f2d Mon Sep 17 00:00:00 2001
From: hanhanW <hanhan0912 at gmail.com>
Date: Thu, 25 Jan 2024 10:40:01 +0800
Subject: [PATCH 3/3] remove includes
---
mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp | 1 -
1 file changed, 1 deletion(-)
diff --git a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
index 02536a40f36d7a9..b404543ddef867e 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/PackAndUnpackPatterns.cpp
@@ -11,7 +11,6 @@
#include "mlir/Dialect/Tensor/Transforms/Transforms.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/IR/PatternMatch.h"
-#include "mlir/Support/LogicalResult.h"
#include "llvm/Support/Debug.h"
namespace mlir {
More information about the Mlir-commits
mailing list