[Mlir-commits] [mlir] [mlir] Do not bufferize parallel_insert_slice dest to read for full slices (PR #112761)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Thu Oct 17 11:56:29 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir
Author: None (Max191)
<details>
<summary>Changes</summary>
In the insert_slice bufferization interface implementation, the destination tensor is not considered read if the full tensor is overwritten by the slice. This PR adds the same check for tensor.parallel_insert_slice.
---
Full diff: https://github.com/llvm/llvm-project/pull/112761.diff
2 Files Affected:
- (modified) mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp (+33-27)
- (modified) mlir/test/Dialect/Tensor/one-shot-bufferize.mlir (+15)
``````````diff
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 87464ccb71720d..def4ee93854a1a 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -19,6 +19,7 @@
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tensor/Transforms/SubsetInsertionOpInterfaceImpl.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
+#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
@@ -636,6 +637,34 @@ struct InsertOpInterface
}
};
+template <typename InsertOpTy>
+static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
+ OpOperand &opOperand) {
+ RankedTensorType destType = insertSliceOp.getDestType();
+
+ // The source is always read.
+ if (opOperand == insertSliceOp.getSourceMutable())
+ return true;
+
+ // For the destination, it depends...
+ assert(opOperand == insertSliceOp.getDestMutable() && "expected dest");
+
+ // Dest is not read if it is entirely overwritten. E.g.:
+ // tensor.insert_slice %a into %t[0][10][1] : ... into tensor<10xf32>
+ bool allOffsetsZero =
+ llvm::all_of(insertSliceOp.getMixedOffsets(),
+ [](OpFoldResult ofr) { return isConstantIntValue(ofr, 0); });
+ bool sizesMatchDestSizes = llvm::all_of(
+ llvm::enumerate(insertSliceOp.getMixedSizes()), [&](const auto &it) {
+ return getConstantIntValue(it.value()) ==
+ destType.getDimSize(it.index());
+ });
+ bool allStridesOne =
+ llvm::all_of(insertSliceOp.getMixedStrides(),
+ [](OpFoldResult ofr) { return isConstantIntValue(ofr, 1); });
+ return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
+}
+
/// Bufferization of tensor.insert_slice. Replace with a memory copy. Under
/// certain circumstances, this op can also be a no-op.
///
@@ -646,32 +675,8 @@ struct InsertSliceOpInterface
tensor::InsertSliceOp> {
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
- auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
- RankedTensorType destType = insertSliceOp.getDestType();
-
- // The source is always read.
- if (opOperand == insertSliceOp.getSourceMutable())
- return true;
-
- // For the destination, it depends...
- assert(opOperand == insertSliceOp.getDestMutable() && "expected dest");
-
- // Dest is not read if it is entirely overwritten. E.g.:
- // tensor.insert_slice %a into %t[0][10][1] : ... into tensor<10xf32>
- bool allOffsetsZero =
- llvm::all_of(insertSliceOp.getMixedOffsets(), [](OpFoldResult ofr) {
- return isConstantIntValue(ofr, 0);
- });
- bool sizesMatchDestSizes = llvm::all_of(
- llvm::enumerate(insertSliceOp.getMixedSizes()), [&](const auto &it) {
- return getConstantIntValue(it.value()) ==
- destType.getDimSize(it.index());
- });
- bool allStridesOne =
- llvm::all_of(insertSliceOp.getMixedStrides(), [](OpFoldResult ofr) {
- return isConstantIntValue(ofr, 1);
- });
- return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
+ return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
+ opOperand);
}
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
@@ -931,7 +936,8 @@ struct ParallelInsertSliceOpInterface
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
const AnalysisState &state) const {
- return true;
+ return insertSliceOpRequiresRead(cast<tensor::ParallelInsertSliceOp>(op),
+ opOperand);
}
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index e2169fe1404c82..dc4306b8316ab7 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -213,6 +213,21 @@ func.func @rank_reducing_parallel_insert_slice(%in: tensor<100xf32>, %out: tenso
// -----
+// CHECK-LABEL: func.func @parallel_insert_full_slice_in_place
+// CHECK-NOT: memref.alloc()
+func.func @parallel_insert_full_slice_in_place(%2: tensor<2xf32>) -> tensor<2xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %3 = scf.forall (%arg0) in (1) shared_outs(%arg2 = %2) -> (tensor<2xf32>) {
+ %fill = linalg.fill ins(%cst : f32) outs(%arg2 : tensor<2xf32>) -> tensor<2xf32>
+ scf.forall.in_parallel {
+ tensor.parallel_insert_slice %fill into %arg2[0] [2] [1] : tensor<2xf32> into tensor<2xf32>
+ }
+ } {mapping = [#gpu.thread<linear_dim_0>]}
+ return %3 : tensor<2xf32>
+}
+
+// -----
+
// This test case could bufferize in-place with a better analysis. However, it
// is simpler to let the canonicalizer fold away the tensor.insert_slice.
``````````
</details>
https://github.com/llvm/llvm-project/pull/112761
More information about the Mlir-commits
mailing list