[Mlir-commits] [mlir] 88539c5 - [mlir][bufferize][NFC] Decouple dropping of equivalent return values from bufferization
Matthias Springer
llvmlistbot at llvm.org
Thu Jun 9 09:51:19 PDT 2022
Author: Matthias Springer
Date: 2022-06-09T18:39:05+02:00
New Revision: 88539c5bdb69bcac8f30cdea612d6e7247192ec8
URL: https://github.com/llvm/llvm-project/commit/88539c5bdb69bcac8f30cdea612d6e7247192ec8
DIFF: https://github.com/llvm/llvm-project/commit/88539c5bdb69bcac8f30cdea612d6e7247192ec8.diff
LOG: [mlir][bufferize][NFC] Decouple dropping of equivalent return values from bufferization
This simplifies the bufferization itself and is in preparation of connecting with the sparse compiler.
Differential Revision: https://reviews.llvm.org/D126814
Added:
mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
Modified:
mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt
mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
mlir/test/Dialect/SCF/one-shot-bufferize.mlir
mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
index 2663e480f281b..422a101040e15 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h
@@ -26,10 +26,6 @@ struct OneShotBufferizationOptions : public BufferizationOptions {
/// Specifies whether returning newly allocated memrefs should be allowed.
/// Otherwise, a pass failure is triggered.
bool allowReturnAllocs = false;
-
- /// Specifies whether buffer return values that are equivalent to a FuncOp
- /// bbArg should be dropped.
- bool dropEquivalentFuncResults = true;
};
/// The BufferizationAliasInfo class maintains a list of buffer aliases and
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
index 1d2068edc8467..edc84575a4575 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.h
@@ -39,6 +39,13 @@ std::unique_ptr<Pass> createBufferResultsToOutParamsPass();
/// Also update all call sites.
LogicalResult promoteBufferResultsToOutParams(ModuleOp module);
+/// Creates a pass that drops memref function results that are equivalent to a
+/// function argument.
+std::unique_ptr<Pass> createDropEquivalentBufferResultsPass();
+
+/// Drop all memref function results that are equivalent to a function argument.
+LogicalResult dropEquivalentBufferResults(ModuleOp module);
+
/// Creates a pass that finalizes a partial bufferization by removing remaining
/// bufferization.to_tensor and bufferization.to_memref operations.
std::unique_ptr<OperationPass<func::FuncOp>> createFinalizingBufferizePass();
diff --git a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
index b72780c965402..5203c49edb331 100644
--- a/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/Bufferization/Transforms/Passes.td
@@ -154,6 +154,20 @@ def BufferizationBufferize : Pass<"bufferization-bufferize", "func::FuncOp"> {
let constructor = "mlir::bufferization::createBufferizationBufferizePass()";
}
+def DropEquivalentBufferResults : Pass<"drop-equivalent-buffer-results", "ModuleOp"> {
+ let summary = "Remove MemRef return values that are equivalent to a bbArg";
+ let description = [{
+ This pass removes MemRef return values from functions if they are equivalent
+ to a function bbArg. In that case, the return value is redundant and the
+ respective CallOp operand can be used at the call site.
+
+ Note: If a bbArg buffer is not returned directly but casted to beforehand,
+ the buffer is still considered equivalent.
+ }];
+ let constructor = "mlir::bufferization::createDropEquivalentBufferResultsPass()";
+ let dependentDialects = ["memref::MemRefDialect"];
+}
+
def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> {
let summary = "One-Shot Bufferize";
let description = [{
@@ -221,11 +235,6 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> {
and supports only simple cases at the moment. In particular:
* Recursive or circular function call graphs are not supported.
- * When a returned tensor can be proven to be equivalent to a tensor function
- argument, the return value disappears. Instead, the buffer of the tensor
- argument is modified in-place.
- * Returning non-equivalent tensors is forbidden by default and must be
- explicitly activated with `allow-return-allocs`.
* External functions (without bodies) that return a tensor are not
supported.
* Function with multiple blocks or multiple ReturnOps are not supported.
@@ -255,9 +264,6 @@ def OneShotBufferize : Pass<"one-shot-bufferize", "ModuleOp"> {
`test-analysis-only`.
}];
let options = [
- Option<"dropEquivalentFuncResults", "drop-equivalent-func-results", "bool",
- /*default=*/"true",
- "Drop buffer return values that are equivalent to a FuncOp arg.">,
Option<"allowReturnAllocs", "allow-return-allocs", "bool",
/*default=*/"false",
"Allows returning/yielding new allocations from a block.">,
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
index a2b6ad8029bee..f3a8d5306de79 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
@@ -180,7 +180,6 @@ struct OneShotBufferizePass
if (!options) {
// Make new bufferization options if none were provided when creating the
// pass.
- opt.dropEquivalentFuncResults = dropEquivalentFuncResults;
opt.allowReturnAllocs = allowReturnAllocs;
opt.allowUnknownOps = allowUnknownOps;
opt.alwaysAliasingWithDest = alwaysAliasingWithDest;
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt
index 189b25e98cb7b..a30359eda8391 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/Bufferization/Transforms/CMakeLists.txt
@@ -5,6 +5,7 @@ add_mlir_dialect_library(MLIRBufferizationTransforms
BufferOptimizations.cpp
BufferResultsToOutParams.cpp
BufferUtils.cpp
+ DropEquivalentBufferResults.cpp
FuncBufferizableOpInterfaceImpl.cpp
OneShotAnalysis.cpp
OneShotModuleBufferize.cpp
@@ -26,3 +27,4 @@ add_mlir_dialect_library(MLIRBufferizationTransforms
MLIRTensor
MLIRTransforms
)
+
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
new file mode 100644
index 0000000000000..90123eb67f296
--- /dev/null
+++ b/mlir/lib/Dialect/Bufferization/Transforms/DropEquivalentBufferResults.cpp
@@ -0,0 +1,151 @@
+//===- DropEquivalentBufferResults.cpp - Calling convention conversion ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass drops return values from functions if they are equivalent to one of
+// their arguments. E.g.:
+//
+// ```
+// func.func @foo(%m : memref<?xf32>) -> (memref<?xf32>) {
+// return %m : memref<?xf32>
+// }
+// ```
+//
+// This functions is rewritten to:
+//
+// ```
+// func.func @foo(%m : memref<?xf32>) {
+// return
+// }
+// ```
+//
+// All call sites are updated accordingly. If a function returns a cast of a
+// function argument, it is also considered equivalent. A cast is inserted at
+// the call site in that case.
+
+#include "PassDetail.h"
+#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
+#include "mlir/Dialect/Func/IR/FuncOps.h"
+#include "mlir/Dialect/MemRef/IR/MemRef.h"
+#include "mlir/IR/Operation.h"
+#include "mlir/Pass/Pass.h"
+
+using namespace mlir;
+
+/// Return the unique ReturnOp that terminates `funcOp`.
+/// Return nullptr if there is no such unique ReturnOp.
+static func::ReturnOp getAssumedUniqueReturnOp(func::FuncOp funcOp) {
+ func::ReturnOp returnOp;
+ for (Block &b : funcOp.getBody()) {
+ if (auto candidateOp = dyn_cast<func::ReturnOp>(b.getTerminator())) {
+ if (returnOp)
+ return nullptr;
+ returnOp = candidateOp;
+ }
+ }
+ return returnOp;
+}
+
+/// Return the func::FuncOp called by `callOp`.
+static func::FuncOp getCalledFunction(CallOpInterface callOp) {
+ SymbolRefAttr sym = callOp.getCallableForCallee().dyn_cast<SymbolRefAttr>();
+ if (!sym)
+ return nullptr;
+ return dyn_cast_or_null<func::FuncOp>(
+ SymbolTable::lookupNearestSymbolFrom(callOp, sym));
+}
+
+LogicalResult
+mlir::bufferization::dropEquivalentBufferResults(ModuleOp module) {
+ IRRewriter rewriter(module.getContext());
+
+ for (auto funcOp : module.getOps<func::FuncOp>()) {
+ if (funcOp.isExternal())
+ continue;
+ func::ReturnOp returnOp = getAssumedUniqueReturnOp(funcOp);
+ // TODO: Support functions with multiple blocks.
+ if (!returnOp)
+ continue;
+
+ // Compute erased results.
+ SmallVector<Value> newReturnValues;
+ BitVector erasedResultIndices(funcOp.getFunctionType().getNumResults());
+ DenseMap<int64_t, int64_t> resultToArgs;
+ for (const auto &it : llvm::enumerate(returnOp.operands())) {
+ bool erased = false;
+ for (BlockArgument bbArg : funcOp.getArguments()) {
+ Value val = it.value();
+ while (auto castOp = val.getDefiningOp<memref::CastOp>())
+ val = castOp.source();
+
+ if (val == bbArg) {
+ resultToArgs[it.index()] = bbArg.getArgNumber();
+ erased = true;
+ break;
+ }
+ }
+
+ if (erased) {
+ erasedResultIndices.set(it.index());
+ } else {
+ newReturnValues.push_back(it.value());
+ }
+ }
+
+ // Update function.
+ funcOp.eraseResults(erasedResultIndices);
+ returnOp.operandsMutable().assign(newReturnValues);
+
+ // Update function calls.
+ module.walk([&](func::CallOp callOp) {
+ if (getCalledFunction(callOp) != funcOp)
+ return WalkResult::skip();
+
+ rewriter.setInsertionPoint(callOp);
+ auto newCallOp = rewriter.create<func::CallOp>(callOp.getLoc(), funcOp,
+ callOp.operands());
+ SmallVector<Value> newResults;
+ int64_t nextResult = 0;
+ for (int64_t i = 0; i < callOp.getNumResults(); ++i) {
+ if (!resultToArgs.count(i)) {
+ // This result was not erased.
+ newResults.push_back(newCallOp.getResult(nextResult++));
+ continue;
+ }
+
+ // This result was erased.
+ Value replacement = callOp.getOperand(resultToArgs[i]);
+ Type expectedType = callOp.getResult(i).getType();
+ if (replacement.getType() != expectedType) {
+ // A cast must be inserted at the call site.
+ replacement = rewriter.create<memref::CastOp>(
+ callOp.getLoc(), expectedType, replacement);
+ }
+ newResults.push_back(replacement);
+ }
+ rewriter.replaceOp(callOp, newResults);
+ return WalkResult::advance();
+ });
+ }
+
+ return success();
+}
+
+namespace {
+struct DropEquivalentBufferResultsPass
+ : DropEquivalentBufferResultsBase<DropEquivalentBufferResultsPass> {
+ void runOnOperation() override {
+ if (failed(bufferization::dropEquivalentBufferResults(getOperation())))
+ return signalPassFailure();
+ }
+};
+} // namespace
+
+std::unique_ptr<Pass>
+mlir::bufferization::createDropEquivalentBufferResultsPass() {
+ return std::make_unique<DropEquivalentBufferResultsPass>();
+}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
index be91f1fd86e99..23846346d6386 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
@@ -223,7 +223,31 @@ struct CallOpInterface
BufferRelation bufferRelation(Operation *op, OpResult opResult,
const AnalysisState &state) const {
- return BufferRelation::Equivalent;
+ func::CallOp callOp = cast<func::CallOp>(op);
+ FuncOp funcOp = getCalledFunction(callOp);
+ assert(funcOp && "expected CallOp to a FuncOp");
+ const FuncAnalysisState &funcState = getFuncAnalysisState(state);
+ if (getFuncOpAnalysisState(state, funcOp) !=
+ FuncOpAnalysisState::Analyzed) {
+ // Function not analyzed yet. The conservative answer is "None".
+ return BufferRelation::None;
+ }
+
+ Optional<int64_t> maybeEquiv =
+ getEquivalentFuncArgIdx(funcOp, funcState, opResult.getResultNumber());
+ if (maybeEquiv.hasValue()) {
+#ifndef NDEBUG
+ SmallVector<OpOperand *> aliasingOpOperands =
+ getAliasingOpOperand(op, opResult, state);
+ assert(aliasingOpOperands.size() == 1 &&
+ "expected exactly 1 aliasing OpOperand");
+ assert(aliasingOpOperands.front()->getOperandNumber() ==
+ maybeEquiv.getValue() &&
+ "inconsistent analysis state");
+#endif
+ return BufferRelation::Equivalent;
+ }
+ return BufferRelation::None;
}
/// All function arguments are writable. It is the responsibility of the
@@ -236,10 +260,6 @@ struct CallOpInterface
FuncOp funcOp = getCalledFunction(callOp);
assert(funcOp && "expected CallOp to a FuncOp");
FunctionType funcType = funcOp.getFunctionType();
- const FuncAnalysisState &funcState =
- getFuncAnalysisState(state.getAnalysisState());
- const OneShotBufferizationOptions &options =
- static_cast<const OneShotBufferizationOptions &>(state.getOptions());
// Result types of the bufferized CallOp.
SmallVector<Type> resultTypes;
@@ -252,22 +272,7 @@ struct CallOpInterface
// Operands of the bufferized CallOp.
SmallVector<Value> newOperands(numOperands, Value());
- // Based on previously gathered equivalence information, we know if a
- // tensor result folds onto an operand. These are the only tensor value
- // results that are supported at the moment.
- //
- // For tensors return values that do not fold onto an operand, additional
- // work is needed (TODO) to either:
- // * hoist a result into an inplaceable operand or
- // * devise a better representation to truly return a buffer.
- //
- // Note: If a function has no body, no equivalence information is
- // available. Consequently, a tensor return value cannot be proven to fold
- // onto a FuncOp bbArg, so calls to such functions are not bufferizable at
- // the moment.
-
- // 1. Compute the result types of the new CallOp. Tensor results that are
- // equivalent to a FuncOp bbArg are no longer returned.
+ // 1. Compute the result types of the new CallOp.
for (const auto &it : llvm::enumerate(callOp.getResultTypes())) {
unsigned returnValIdx = it.index();
Type returnType = it.value();
@@ -278,28 +283,7 @@ struct CallOpInterface
continue;
}
- if (options.dropEquivalentFuncResults) {
- if (Optional<int64_t> bbArgIdx =
- getEquivalentFuncArgIdx(funcOp, funcState, returnValIdx)) {
- // Return operands that are equivalent to some bbArg, are not
- // returned.
- FailureOr<Value> bufferOrFailure =
- state.getBuffer(rewriter, callOp->getOpOperand(*bbArgIdx));
- if (failed(bufferOrFailure))
- return failure();
- replacementValues[returnValIdx] = *bufferOrFailure;
- newOperands[*bbArgIdx] = *bufferOrFailure;
- continue;
- }
- }
-
- if (!options.allowReturnAllocs)
- return callOp->emitError(
- "call to FuncOp that returns non-equivalent tensors not supported");
-
- // Returning a memref. This memref is not equivalent to any bbArg. It is
- // likely a newly allocated buffer. We may want to hoist such allocations
- // to the call site in the future.
+ // Returning a memref.
retValMapping[returnValIdx] = resultTypes.size();
resultTypes.push_back(funcType.getResult(resultTypes.size()));
}
@@ -315,9 +299,7 @@ struct CallOpInterface
continue;
}
- // Retrieve buffers for tensor operands. Tensor operand buffers, who's
- // corresponding FuncOp bbArgs are equivalent to a returned tensor, were
- // already stored in `newOperands` during Step 1.
+ // Retrieve buffers for tensor operands.
Value buffer = newOperands[idx];
if (!buffer) {
FailureOr<Value> bufferOrFailure = state.getBuffer(rewriter, opOperand);
@@ -348,7 +330,7 @@ struct CallOpInterface
Operation *newCallOp = rewriter.create<func::CallOp>(
callOp.getLoc(), funcOp.getSymName(), resultTypes, newOperands);
newCallOp->setAttrs(callOp->getAttrs());
- // Get replacement values for non-tensor / non-equivalent results.
+ // Get replacement values.
for (unsigned i = 0; i < replacementValues.size(); ++i) {
if (replacementValues[i])
continue;
@@ -400,20 +382,12 @@ struct FuncOpInterface
/// function body has been bufferized, function return types can be switched
/// to more concise memref types as part of `foldMemRefCasts`.
///
- /// When a tensor function argument is known to be equivalent to a tensor
- /// result, it is dropped from the return values.
- ///
/// All function bbArgs are writable unless they are explicitly marked as
/// read-only. Callers must insert copies when needed.
- ///
- /// Note: Returning a memref is possible, but corresponding CallOp
- /// bufferizations fail unless `allowReturnAllocs`.
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
BufferizationState &state) const {
auto funcOp = cast<FuncOp>(op);
FunctionType funcType = funcOp.getFunctionType();
- const FuncAnalysisState &funcState =
- getFuncAnalysisState(state.getAnalysisState());
const OneShotBufferizationOptions &options =
static_cast<const OneShotBufferizationOptions &>(state.getOptions());
@@ -492,24 +466,6 @@ struct FuncOpInterface
continue;
}
- // If return operand is equivalent to some bbArg, no need to return it.
- if (options.dropEquivalentFuncResults) {
- if (Optional<int64_t> equivBbArgIdx = getEquivalentFuncArgIdx(
- funcOp, funcState, returnOperand.getOperandNumber())) {
- // TODO: Use memref type with fully dynamic layout map and add folder
- // for memref.cast + memref.copy.
- Value toMemrefOp = rewriter.create<bufferization::ToMemrefOp>(
- loc, getMemRefType(tensorType, options), returnVal);
- BlockArgument equivBbArg = funcOp.getArgument(*equivBbArgIdx);
- // Note: This copy will fold away. It must be inserted here to ensure
- // that `returnVal` still has at least one use and does not fold away.
- if (failed(
- options.createMemCpy(rewriter, loc, toMemrefOp, equivBbArg)))
- return funcOp->emitError("could not generate copy for bbArg");
- continue;
- }
- }
-
BaseMemRefType resultType;
if (options.functionBoundaryTypeConversion ==
BufferizationOptions::LayoutMapOption::IdentityLayoutMap) {
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
index 243db9651c4f0..233dedb8d20c1 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
@@ -24,19 +24,6 @@
// * `funcOpBbArgReadWriteAnalysis` determines whether or not a tensor bbArg is
// read/written.
//
-// Only tensors that are equivalent to some FuncOp bbArg may be returned.
-// Bufferization currently fails if other tensors (in particular tensors that
-// bufferize out-of-place and result in a new buffer allocation) are returned.
-// In the future, such allocations could be hoisted to the caller.
-//
-// Example: `foo` fails bufferization because %0 is not equivalent to any bbArg.
-// ```
-// func @foo() -> tensor<?xf32> {
-// %0 = bufferization.alloc_tensor(...) : tensor<?xf32>
-// return %0 : tensor<?xf32>
-// }
-// ```
-//
// Module Bufferization implements the following calling convention.
//
// * In the absence of conflicts within a FuncOp, the FuncOp's bbArgs may always
@@ -464,17 +451,6 @@ LogicalResult mlir::bufferization::bufferizeModuleOp(
foldMemRefCasts(funcOp);
}
- // Check result.
- for (func::FuncOp funcOp : orderedFuncOps) {
- if (!options.allowReturnAllocs &&
- llvm::any_of(funcOp.getFunctionType().getResults(), [](Type t) {
- return t.isa<MemRefType, UnrankedMemRefType>();
- })) {
- funcOp->emitError("memref return type is unsupported");
- return failure();
- }
- }
-
// Post-pass cleanup of function argument attributes.
moduleOp.walk([&](func::FuncOp op) {
for (BlockArgument bbArg : op.getArguments())
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
index 044392a266752..3d682c98d3f1f 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-allow-return-allocs.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs drop-equivalent-func-results=false" -split-input-file | FileCheck %s --check-prefix=EQUIV
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -drop-equivalent-buffer-results -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -split-input-file | FileCheck %s --check-prefix=NO-DROP
// Run fuzzer with
diff erent seeds.
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
@@ -73,6 +73,6 @@ func.func @return_arg(%A: tensor<?xf32>) -> tensor<?xf32> {
// CHECK-SAME: %[[A:.*]]: memref<?xf32
// CHECK-NOT: return %[[A]]
-// EQUIV-LABEL: func @return_arg
-// EQUIV-SAME: %[[A:.*]]: memref<?xf32
-// EQUIV: return %[[A]]
+// NO-DROP-LABEL: func @return_arg
+// NO-DROP-SAME: %[[A:.*]]: memref<?xf32
+// No_DROP: return %[[A]]
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
index 884f7c475eb77..d792565469388 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize-out-params.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=fully-dynamic-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=identity-layout-map" -drop-equivalent-buffer-results -buffer-results-to-out-params -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries allow-return-allocs function-boundary-type-conversion=infer-layout-map" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s --check-prefix=CHECK-BASELINE
// Note: function-boundary-type-conversion=infer-layout-map with
// promote-buffer-results-to-out-params is an unsupported combination.
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
index beb3b38da7b0e..470caa175f6ec 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-module-bufferize.mlir
@@ -1,5 +1,5 @@
// Note: Default is function-boundary-type-conversion=infer-layout-map
-// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs" -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with
diff erent seeds.
// RUN: mlir-opt %s -one-shot-bufferize="bufferize-function-boundaries=1 allow-return-allocs test-analysis-only analysis-fuzzer-seed=23" -split-input-file -o /dev/null
@@ -198,7 +198,7 @@ func.func @call_func_with_non_tensor_return(
// Note: The tensor return value cannot fold away because the CallOp
// bufferized out-of-place.
- // CHECK: return %[[call]], %[[alloc]] : f32, memref<?xf32>
+ // CHECK: return %[[call]], %[[casted]] : f32, memref<?xf32
return %1, %0 : f32, tensor<?xf32>
}
@@ -261,7 +261,7 @@ func.func @does_not_read(%t: tensor<?xf32>) -> tensor<?xf32> {
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
// CHECK-NOT: copy
// CHECK: call @does_not_read(%[[casted]])
-// CHECK: %[[r:.*]] = memref.load %[[alloc]]
+// CHECK: %[[r:.*]] = memref.load %[[casted]]
// CHECK: memref.dealloc %[[alloc]]
func.func @main(%t: tensor<?xf32> {bufferization.writable = false}) -> f32 {
%0 = call @does_not_read(%t) : (tensor<?xf32>) -> (tensor<?xf32>)
@@ -472,7 +472,7 @@ func.func @main() {
%res = call @init_and_dot(%AA, %BB, %CC) :
(tensor<64xf32>, tensor<64xf32>, tensor<f32>) -> tensor<f32>
- // CHECK-NEXT: %[[dC:.*]] = memref.cast %[[C]] : memref<f32> to memref<*xf32>
+ // CHECK-NEXT: %[[dC:.*]] = memref.cast %[[cC]] : memref<f32, {{.*}}> to memref<*xf32>
%res2 = tensor.cast %res: tensor<f32> to tensor<*xf32>
// CHECK-NEXT: call @printMemrefF32(%[[dC]]) : (memref<*xf32>) -> ()
@@ -562,9 +562,11 @@ func.func @equivalent_func_arg(%t0: tensor<?xf32> {bufferization.writable = true
%c0: index, %c10: index, %c1: index) -> tensor<?xf32> {
// CHECK-NOT: alloc
// CHECK-NOT: copy
+ // CHECK: scf.for {{.*}} iter_args(%[[t1:.*]] = %[[arg0]])
%1 = scf.for %iv = %c0 to %c10 step %c1 iter_args(%t1 = %t0) -> (tensor<?xf32>) {
- // CHECK: call @inner_func(%[[arg0]])
+ // CHECK: call @inner_func(%[[t1]])
%3 = func.call @inner_func(%t1) : (tensor<?xf32>) -> tensor<?xf32>
+ // CHECK: scf.yield %[[t1]]
scf.yield %3 : tensor<?xf32>
}
return %1: tensor<?xf32>
diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
index 15a85f30fc0b1..dbf1ccca2979b 100644
--- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-loop-hoisting -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-loop-hoisting -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with
diff erent seeds.
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
@@ -6,7 +6,7 @@
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=91 bufferize-function-boundaries" -split-input-file -o /dev/null
// Test bufferization using memref types that have no layout map.
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs unknown-type-conversion=identity-layout-map function-boundary-type-conversion=identity-layout-map bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP
// TODO: Some test cases from this file should be moved to other dialects.
diff --git a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
index 7594dcbe5a702..2eacdd375610a 100644
--- a/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/SCF/one-shot-bufferize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -buffer-deallocation -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -drop-equivalent-buffer-results -buffer-deallocation -split-input-file | FileCheck %s
// Run fuzzer with
diff erent seeds.
// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
diff --git a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
index 1e24376c9434d..0d05f0ff9264c 100644
--- a/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Tensor/one-shot-bufferize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs bufferize-function-boundaries" -drop-equivalent-buffer-results -split-input-file | FileCheck %s
// Run fuzzer with
diff erent seeds.
// RUN: mlir-opt %s -one-shot-bufferize="allow-return-allocs test-analysis-only analysis-fuzzer-seed=23 bufferize-function-boundaries" -split-input-file -o /dev/null
More information about the Mlir-commits
mailing list