[Mlir-commits] [mlir] be630f0 - [mlir][bufferize] Implement BufferizableOpInterface for tensor.empty
Matthias Springer
llvmlistbot at llvm.org
Mon Dec 12 05:26:10 PST 2022
Author: Matthias Springer
Date: 2022-12-12T14:19:38+01:00
New Revision: be630f07de0c17bade67e6ab6a297db41003775d
URL: https://github.com/llvm/llvm-project/commit/be630f07de0c17bade67e6ab6a297db41003775d
DIFF: https://github.com/llvm/llvm-project/commit/be630f07de0c17bade67e6ab6a297db41003775d.diff
LOG: [mlir][bufferize] Implement BufferizableOpInterface for tensor.empty
The op is not bufferizable but should be analyzable (for `EliminateEmptyTensors`, which uses the bufferization infrastructure).
Also improve debugging functionality and error messages.
Also adds a missing pass to the sparse pipeline. (tensor.empty should be replaced with bufferization.alloc_tensor, but it sometimes used to work without depending on how the tensor.empty is used. Now we always fail explicitly.)
Added:
Modified:
mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp
mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
mlir/test/Dialect/Tensor/bufferize.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
index bf745e24cef49..034ff5b3db2d8 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
@@ -804,6 +804,23 @@ static bool wouldCreateReadAfterWriteInterference(
aliasInfo);
}
+/// Annotate IR with details about the detected non-writability conflict.
+static void annotateNonWritableTensor(Value value) {
+ static int64_t counter = 0;
+ OpBuilder b(value.getContext());
+ std::string id = "W_" + std::to_string(counter++);
+ if (auto opResult = value.dyn_cast<OpResult>()) {
+ std::string attr = id + "[NOT-WRITABLE: result " +
+ std::to_string(opResult.getResultNumber()) + "]";
+ opResult.getDefiningOp()->setAttr(attr, b.getUnitAttr());
+ } else {
+ auto bbArg = value.cast<BlockArgument>();
+ std::string attr = id + "[NOT-WRITABLE: bbArg " +
+ std::to_string(bbArg.getArgNumber()) + "]";
+ bbArg.getOwner()->getParentOp()->setAttr(attr, b.getUnitAttr());
+ }
+}
+
/// Check the reverse SSA use-def chain (following aliasing OpOperands) for
/// non-writable tensor values. Stop searching when an out-of-place bufferized
/// OpOperand was found (or when the OpOperand was not bufferized yet).
@@ -817,8 +834,11 @@ hasPrecedingAliasingNonWritableTensor(Value value, OpOperand *currentOpOperand,
worklist.push_back(value);
while (!worklist.empty()) {
Value nextVal = worklist.pop_back_val();
- if (!state.isWritable(nextVal))
+ if (!state.isWritable(nextVal)) {
+ if (state.getOptions().printConflicts)
+ annotateNonWritableTensor(nextVal);
return true;
+ }
// If `nextVal` is not a BlockArgument: End of use-def chain reached.
auto opResult = nextVal.dyn_cast<OpResult>();
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp
index dd8dec9859568..8d01a322222a7 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparsificationAndBufferizationPass.cpp
@@ -13,6 +13,7 @@
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotModuleBufferize.h"
+#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Bufferization/Transforms/Transforms.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
@@ -102,6 +103,8 @@ class SparsificationAndBufferizationPass
// Run enabling transformations.
OpPassManager pm("builtin.module");
pm.addPass(createPreSparsificationRewritePass());
+ pm.addNestedPass<func::FuncOp>(
+ bufferization::createEmptyTensorToAllocTensorPass());
if (failed(runPipeline(pm, getOperation())))
return signalPassFailure();
}
diff --git a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
index 528d83f76e050..aa5a1d8716f72 100644
--- a/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Tensor/Transforms/BufferizableOpInterfaceImpl.cpp
@@ -230,6 +230,24 @@ struct DimOpInterface
}
};
+/// Bufferization of tensor.empty. This op does not bufferize, but we need an
+/// interface implementation, so that the result of this op is considered
+/// "writable" (default impl. of `isWritable`). Results of ops that do not
+/// implement `BufferizableOpInterface` are not writable.
+struct EmptyOpInterface
+ : public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
+ tensor::EmptyOp> {
+ LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
+ const BufferizationOptions &options) const {
+ // tensor.empty ops are used to indicate the shape of a tensor. They have
+ // no defined contents and cannot be bufferized. However, they can be
+ // converted to bufferization.alloc_tensor ops, which then bufferize to an
+ // allocation (--empty-tensor-to-alloc-tensor).
+ return op->emitOpError("cannot be bufferized, but can be converted to "
+ "bufferization.alloc_tensor");
+ }
+};
+
/// Bufferization of tensor.expand_shape. Replace with memref.expand_shape.
struct ExpandShapeOpInterface
: public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
@@ -1060,6 +1078,7 @@ void mlir::tensor::registerBufferizableOpInterfaceExternalModels(
CastOp::attachInterface<CastOpInterface>(*ctx);
CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
DimOp::attachInterface<DimOpInterface>(*ctx);
+ EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
diff --git a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
index e0c3f08df9ce1..f6355192d0dca 100755
--- a/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_sddmm.mlir
@@ -22,12 +22,12 @@
// CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<1024x1024xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false]} : tensor<1024x1024xf64>
// CHECK: return %[[VAL_1]] : tensor<1024x1024xf64>
// CHECK: }
func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
%cst = arith.constant 0.000000e+00 : f64
- %0 = tensor.empty() : tensor<1024x1024xf64>
+ %0 = bufferization.alloc_tensor() : tensor<1024x1024xf64>
%1 = linalg.generic {indexing_maps = [affine_map<(d0, d1) -> ()>,
affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
@@ -41,12 +41,12 @@ func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
// CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> {
// CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64>
-// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false], memory_space = 0 : i64} : tensor<32xf64>
+// CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) {bufferization.escape = [false]} : tensor<32xf64>
// CHECK: return %[[VAL_1]] : tensor<32xf64>
// CHECK: }
func.func @fold_yield_direct_zero() -> tensor<32xf64> {
%cst = arith.constant 0.000000e+00 : f64
- %0 = tensor.empty() : tensor<32xf64>
+ %0 = bufferization.alloc_tensor() : tensor<32xf64>
%1 = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
outs(%0 : tensor<32xf64>) {
diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir
index 49e29d0fec668..4948b0dccf976 100644
--- a/mlir/test/Dialect/Tensor/bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/bufferize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -tensor-bufferize -cse -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -tensor-bufferize -cse -split-input-file -verify-diagnostics | FileCheck %s
// CHECK-LABEL: func @dim(
// CHECK-SAME: %[[TENSOR:.*]]: tensor<f32>,
@@ -61,6 +61,14 @@ func.func @tensor.cast_to_unranked(%arg0: tensor<2xf32>) -> tensor<*xf32> {
return %0 : tensor<*xf32>
}
+// -----
+func.func @tensor.empty() -> tensor<5xf32> {
+ // expected-error at +2 {{failed to bufferize op}}
+ // expected-error at +1 {{cannot be bufferized, but can be converted to bufferization.alloc_tensor}}
+ %0 = tensor.empty() : tensor<5xf32>
+ return %0 : tensor<5xf32>
+}
+
// -----
// CHECK-LABEL: func @tensor.extract(
More information about the Mlir-commits
mailing list