[Mlir-commits] [mlir] [MLIR] Make `OneShotModuleBufferize` use `OpInterface` (PR #107295)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Sep 24 07:52:09 PDT 2024
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff 660e34fd38c3fb39fba1871bbf5b2eb3a48bf277 1811994699517d14b659405ff979b91f8e5e37a0 --extensions h,cpp -- mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h mlir/include/mlir/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.h mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index ba28596d1f..eb0df1d92d 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -9,9 +9,9 @@
#ifndef MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZABLEOPINTERFACE_H_
#define MLIR_DIALECT_BUFFERIZATION_IR_BUFFERIZABLEOPINTERFACE_H_
-#include "mlir/Interfaces/FunctionInterfaces.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
+#include "mlir/Interfaces/FunctionInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/DenseMapInfoVariant.h"
#include "llvm/ADT/SetVector.h"
@@ -261,9 +261,9 @@ struct BufferizationOptions {
using AnalysisStateInitFn = std::function<void(AnalysisState &)>;
/// Tensor -> MemRef type converter.
/// Parameters: Value, memory space, func op, bufferization options
- using FunctionArgTypeConverterFn =
- std::function<BaseMemRefType(TensorType, Attribute memorySpace,
- FunctionOpInterface, const BufferizationOptions &)>;
+ using FunctionArgTypeConverterFn = std::function<BaseMemRefType(
+ TensorType, Attribute memorySpace, FunctionOpInterface,
+ const BufferizationOptions &)>;
/// Tensor -> MemRef type converter.
/// Parameters: Value, memory space, bufferization options
using UnknownTypeConverterFn = std::function<BaseMemRefType(
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
index ce90d907b4..f934ae8827 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotModuleBufferize.cpp
@@ -88,7 +88,7 @@ getOrCreateFuncAnalysisState(OneShotAnalysisState &state) {
/// Return the unique ReturnOp that terminates `funcOp`.
/// Return nullptr if there is no such unique ReturnOp.
-static Operation* getAssumedUniqueReturnOp(FunctionOpInterface funcOp) {
+static Operation *getAssumedUniqueReturnOp(FunctionOpInterface funcOp) {
Operation *returnOp = nullptr;
for (Block &b : funcOp.getFunctionBody()) {
auto candidateOp = b.getTerminator();
@@ -127,7 +127,8 @@ static void annotateEquivalentReturnBbArg(OpOperand &returnVal,
/// Store function BlockArguments that are equivalent to/aliasing a returned
/// value in FuncAnalysisState.
static LogicalResult
-aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &state,
+aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp,
+ OneShotAnalysisState &state,
FuncAnalysisState &funcState) {
if (funcOp.getFunctionBody().empty()) {
// No function body available. Conservatively assume that every tensor
@@ -168,8 +169,8 @@ aliasingFuncOpBBArgsAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &s
return success();
}
-static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx, bool isRead,
- bool isWritten) {
+static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx,
+ bool isRead, bool isWritten) {
OpBuilder b(funcOp.getContext());
Attribute accessType;
if (isRead && isWritten) {
@@ -189,10 +190,10 @@ static void annotateFuncArgAccess(FunctionOpInterface funcOp, int64_t idx, bool
/// function with unknown ops, we conservatively assume that such ops bufferize
/// to a read + write.
static LogicalResult
-funcOpBbArgReadWriteAnalysis(FunctionOpInterface funcOp, OneShotAnalysisState &state,
+funcOpBbArgReadWriteAnalysis(FunctionOpInterface funcOp,
+ OneShotAnalysisState &state,
FuncAnalysisState &funcState) {
- for (int64_t idx = 0, e = funcOp.getNumArguments(); idx < e;
- ++idx) {
+ for (int64_t idx = 0, e = funcOp.getNumArguments(); idx < e; ++idx) {
// Skip non-tensor arguments.
if (!isa<TensorType>(funcOp.getArgumentTypes()[idx]))
continue;
@@ -277,10 +278,8 @@ static void equivalenceAnalysis(FunctionOpInterface funcOp,
/// Return "true" if the given function signature has tensor semantics.
static bool hasTensorSignature(FunctionOpInterface funcOp) {
- return llvm::any_of(funcOp.getArgumentTypes(),
- llvm::IsaPred<TensorType>) ||
- llvm::any_of(funcOp.getResultTypes(),
- llvm::IsaPred<TensorType>);
+ return llvm::any_of(funcOp.getArgumentTypes(), llvm::IsaPred<TensorType>) ||
+ llvm::any_of(funcOp.getResultTypes(), llvm::IsaPred<TensorType>);
}
/// Store all functions of the `moduleOp` in `orderedFuncOps`, sorted by
@@ -310,7 +309,8 @@ getFuncOpsOrderedByCalls(ModuleOp moduleOp,
numberCallOpsContainedInFuncOp[funcOp] = 0;
return funcOp.walk([&](CallOpInterface callOp) -> WalkResult {
FunctionOpInterface calledFunction = getCalledFunction(callOp);
- assert(calledFunction && "could not retrieved called FunctionOpInterface");
+ assert(calledFunction &&
+ "could not retrieved called FunctionOpInterface");
// If the called function does not have any tensors in its signature, then
// it is not necessary to bufferize the callee before the caller.
if (!hasTensorSignature(calledFunction))
@@ -364,8 +364,8 @@ static void foldMemRefCasts(FunctionOpInterface funcOp) {
}
}
- auto newFuncType = FunctionType::get(
- funcOp.getContext(), funcOp.getArgumentTypes(), resultTypes);
+ auto newFuncType = FunctionType::get(funcOp.getContext(),
+ funcOp.getArgumentTypes(), resultTypes);
funcOp.setType(newFuncType);
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/107295
More information about the Mlir-commits
mailing list