[Mlir-commits] [mlir] 1403073 - [mlir][tensor] Fold rank-reducing insert_slice with inverse collapse_shape
Matthias Springer
llvmlistbot at llvm.org
Mon Dec 5 00:24:30 PST 2022
Author: Matthias Springer
Date: 2022-12-05T09:17:29+01:00
New Revision: 140307379075ddd5aa6593d74c89e519baea7238
URL: https://github.com/llvm/llvm-project/commit/140307379075ddd5aa6593d74c89e519baea7238
DIFF: https://github.com/llvm/llvm-project/commit/140307379075ddd5aa6593d74c89e519baea7238.diff
LOG: [mlir][tensor] Fold rank-reducing insert_slice with inverse collapse_shape
Differential Revision: https://reviews.llvm.org/D139221
Added:
Modified:
mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
index c1166c5eb5ec6..b655df3c2cc48 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/ReshapePatterns.cpp
@@ -49,9 +49,41 @@ struct FoldExpandOfRankReducingExtract
return success();
}
};
+
+/// Fold insert_slice(collapse_shape) ops that cancel itself out.
+struct FoldInsertOfRankReducingInsert : public OpRewritePattern<InsertSliceOp> {
+ using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
+ PatternRewriter &rewriter) const override {
+ auto collapseShapeOp =
+ insertSliceOp.getSource().getDefiningOp<CollapseShapeOp>();
+ if (!collapseShapeOp)
+ return failure();
+ RankedTensorType srcType = collapseShapeOp.getSrcType();
+
+ // Only cases where the CollapseShapeOp can be folded away entirely are
+ // supported. Moreover, only simple cases where the resulting InsertSliceOp
+ // has no rank-reduction anymore are supported at the moment.
+ RankedTensorType nonReducingInsertType =
+ RankedTensorType::get(insertSliceOp.getStaticSizes(),
+ insertSliceOp.getType().getElementType());
+ if (nonReducingInsertType != srcType)
+ return failure();
+
+ SmallVector<OpFoldResult> mixedOffsets = insertSliceOp.getMixedOffsets();
+ SmallVector<OpFoldResult> mixedSizes = insertSliceOp.getMixedSizes();
+ SmallVector<OpFoldResult> mixedStrides = insertSliceOp.getMixedStrides();
+ rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
+ insertSliceOp, collapseShapeOp.getSrc(), insertSliceOp.getDest(),
+ mixedOffsets, mixedSizes, mixedStrides);
+ return success();
+ }
+};
} // namespace
void mlir::tensor::populateReassociativeReshapeFoldingPatterns(
RewritePatternSet &patterns) {
- patterns.add<FoldExpandOfRankReducingExtract>(patterns.getContext());
+ patterns.add<FoldExpandOfRankReducingExtract, FoldInsertOfRankReducingInsert>(
+ patterns.getContext());
}
diff --git a/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir b/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
index c81e531507a28..15a00a58c0f5a 100644
--- a/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
+++ b/mlir/test/Dialect/Tensor/fold-reassociative-reshapes.mlir
@@ -17,3 +17,19 @@ func.func @expand_shape_of_rank_reducing_extract(
: tensor<?x1x5xf32> into tensor<?x1x1x5xf32>
return %1, %2 : tensor<?x1x1x5xf32>, tensor<?x1x1x5xf32>
}
+
+// -----
+
+// CHECK-LABEL: func @rank_reducing_insert_of_collapse_shape(
+// CHECK-SAME: %[[t:.*]]: tensor<?x1x1x5xf32>
+// CHECK: %[[insert:.*]] = tensor.insert_slice %[[t]] into %{{.*}}[0, 0, 0, 0] [%{{.*}}, 1, 1, 5] [1, 1, 1, 1] : tensor<?x1x1x5xf32> into tensor<?x?x?x?xf32>
+// CHECK: return %[[insert]]
+func.func @rank_reducing_insert_of_collapse_shape(
+ %t: tensor<?x1x1x5xf32>, %d: tensor<?x?x?x?xf32>, %sz: index)
+ -> tensor<?x?x?x?xf32> {
+ %0 = tensor.collapse_shape %t [[0, 1], [2], [3]]
+ : tensor<?x1x1x5xf32> into tensor<?x1x5xf32>
+ %1 = tensor.insert_slice %0 into %d[0, 0, 0, 0][%sz, 1, 1, 5][1, 1, 1, 1]
+ : tensor<?x1x5xf32> into tensor<?x?x?x?xf32>
+ return %1 : tensor<?x?x?x?xf32>
+}
More information about the Mlir-commits
mailing list