[Mlir-commits] [mlir] f3483c2 - [mlir][bufferization] Fix getAliasingOpOperand/OpResult for non-bufferizable ops
Matthias Springer
llvmlistbot at llvm.org
Mon Jan 30 01:10:54 PST 2023
Author: Matthias Springer
Date: 2023-01-30T10:10:43+01:00
New Revision: f3483c23ce2c833f9bcb6edb2ac99e7af76218ad
URL: https://github.com/llvm/llvm-project/commit/f3483c23ce2c833f9bcb6edb2ac99e7af76218ad
DIFF: https://github.com/llvm/llvm-project/commit/f3483c23ce2c833f9bcb6edb2ac99e7af76218ad.diff
LOG: [mlir][bufferization] Fix getAliasingOpOperand/OpResult for non-bufferizable ops
Also enable analysis of unknown ops.
Differential Revision: https://reviews.llvm.org/D142006
Added:
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
Modified:
mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
Removed:
################################################################################
diff --git a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
index 061f2809b6d12..fda05534bca36 100644
--- a/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
+++ b/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h
@@ -315,12 +315,14 @@ bool isFunctionArgument(Value value);
/// tensor values.
class AnalysisState {
public:
- /// Determine which OpOperand* will alias with `result` if the op is
- /// bufferized in place. Return an empty vector if the op is not bufferizable.
- SmallVector<OpOperand *> getAliasingOpOperand(OpResult result) const;
+ /// Determine which OpOperand* will alias with `opResult` if the op is
+ /// bufferized in place. Return all tensor OpOperand* if the op is not
+ /// bufferizable.
+ SmallVector<OpOperand *> getAliasingOpOperand(OpResult opResult) const;
/// Determine which OpResult will alias with `opOperand` if the op is
- /// bufferized in place. Return an empty vector if the op is not bufferizable.
+ /// bufferized in place. Return all tensor OpResults if the op is not
+ /// bufferizable.
SmallVector<OpResult> getAliasingOpResult(OpOperand &opOperand) const;
/// Return true if `opOperand` bufferizes to a memory read. Return `true` if
@@ -582,6 +584,14 @@ bool defaultResultBufferizesToMemoryWrite(OpResult opResult,
/// places.
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp,
unsigned index);
+
+/// This is the default implementation of getAliasingOpOperand in case the
+/// defining op does not implement the BufferizableOpInterface.
+SmallVector<OpOperand *> unknownGetAliasingOpOperand(OpResult opResult);
+
+/// This is the default implementation of getAliasingOpResult in case the
+/// defining op does not implement the BufferizableOpInterface.
+SmallVector<OpResult> unknownGetAliasingOpResult(OpOperand &opOperand);
} // namespace detail
} // namespace bufferization
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index c24653d18abaa..dc430177133aa 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -349,24 +349,29 @@ static void setInsertionPointAfter(OpBuilder &b, Value value) {
}
}
-/// Determine which OpOperand* will alias with `result` if the op is bufferized
-/// in place. Return an empty vector if the op is not bufferizable.
+/// Determine which OpOperand* will alias with `opResult` if the op is
+/// bufferized in place. Return all tensor OpOperand* if the op is not
+/// bufferizable.
SmallVector<OpOperand *>
-AnalysisState::getAliasingOpOperand(OpResult result) const {
- if (Operation *op = result.getDefiningOp())
+AnalysisState::getAliasingOpOperand(OpResult opResult) const {
+ if (Operation *op = opResult.getDefiningOp())
if (auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
- return bufferizableOp.getAliasingOpOperand(result, *this);
- return {};
+ return bufferizableOp.getAliasingOpOperand(opResult, *this);
+
+ // The op is not bufferizable.
+ return detail::unknownGetAliasingOpOperand(opResult);
}
/// Determine which OpResult will alias with `opOperand` if the op is bufferized
-/// in place. Return an empty vector if the op is not bufferizable.
+/// in place. Return all tensor OpResults if the op is not bufferizable.
SmallVector<OpResult>
AnalysisState::getAliasingOpResult(OpOperand &opOperand) const {
if (auto bufferizableOp =
getOptions().dynCastBufferizableOp(opOperand.getOwner()))
return bufferizableOp.getAliasingOpResult(opOperand, *this);
- return {};
+
+ // The op is not bufferizable.
+ return detail::unknownGetAliasingOpResult(opOperand);
}
/// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
@@ -623,103 +628,6 @@ FailureOr<Value> bufferization::getBuffer(RewriterBase &rewriter, Value value,
.getResult();
}
-bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
- OpResult opResult, const AnalysisState &state) {
- auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
- SmallVector<OpOperand *> opOperands =
- bufferizableOp.getAliasingOpOperand(opResult, state);
-
- // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
- // memory writes.
- if (opOperands.empty())
- return true;
-
- // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
- // may bufferize to a memory write.
- if (llvm::any_of(opOperands, [&](OpOperand *operand) {
- return state.bufferizesToMemoryWrite(*operand);
- }))
- return true;
-
- // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
- // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
- // case, the OpResult bufferizes to a memory write. E.g.:
- //
- // %0 = "some_writing_op" : tensor<?xf32>
- // %r = scf.if ... -> tensor<?xf32> {
- // scf.yield %0 : tensor<?xf32>
- // } else {
- // %1 = "another_writing_op"(%0) : tensor<?xf32>
- // scf.yield %1 : tensor<?xf32>
- // }
- // "some_reading_op"(%r)
- //
- // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
- // bufferizes to a memory write and the defining op is inside the scf.if.
- //
- // Note: This treatment of surrouding ops is useful for ops that have a
- // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
- // the analysis considerably.
- //
- // "another_writing_op" in the above example should be able to bufferize
- // inplace in the absence of another read of %0. However, if the scf.if op
- // would not be considered a "write", the analysis would detect the
- // following conflict:
- //
- // * read = some_reading_op
- // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
- // * conflictingWrite = %1
- //
- auto isMemoryWriteInsideOp = [&](Value v) {
- Operation *op = getOwnerOfValue(v);
- if (!opResult.getDefiningOp()->isAncestor(op))
- return false;
- return state.bufferizesToMemoryWrite(v);
- };
- for (OpOperand *operand : opOperands) {
- if (!state
- .findValueInReverseUseDefChain(operand->get(),
- isMemoryWriteInsideOp,
- /*followEquivalentOnly=*/false)
- .empty())
- return true;
- }
- return false;
-}
-
-FailureOr<BaseMemRefType> bufferization::detail::defaultGetBufferType(
- Value value, const BufferizationOptions &options,
- const DenseMap<Value, BaseMemRefType> &fixedTypes) {
- assert(value.getType().isa<TensorType>() && "expected tensor type");
-
- // No further analysis is possible for a block argument.
- if (value.isa<BlockArgument>())
- return bufferization::getMemRefType(value, options);
-
- // Value is an OpResult.
- Operation *op = getOwnerOfValue(value);
- auto opResult = value.cast<OpResult>();
- auto bufferizableOp = cast<BufferizableOpInterface>(op);
- AnalysisState state(options);
- auto aliasingOperands = bufferizableOp.getAliasingOpOperand(opResult, state);
- if (!aliasingOperands.empty() &&
- bufferizableOp.bufferRelation(opResult, state) ==
- BufferRelation::Equivalent) {
- // If the OpResult has an equivalent OpOperand, both OpResult and
- // OpOperand bufferize to the exact same buffer type.
- Value equivalentOperand = aliasingOperands.front()->get();
- return getBufferType(equivalentOperand, options, fixedTypes);
- }
-
- // If we do not know the memory space and there is no default memory space,
- // report a failure.
- if (!options.defaultMemorySpace.has_value())
- return op->emitError("could not infer memory space");
-
- return getMemRefType(value, options, /*layout=*/{},
- *options.defaultMemorySpace);
-}
-
/// Return the buffer type for a given Value (tensor) after bufferization.
FailureOr<BaseMemRefType>
bufferization::getBufferType(Value value, const BufferizationOptions &options) {
@@ -899,6 +807,107 @@ bufferization::getMemRefTypeWithStaticIdentityLayout(TensorType tensorType,
memorySpace);
}
+//===----------------------------------------------------------------------===//
+// Default implementations of interface methods
+//===----------------------------------------------------------------------===//
+
+bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
+ OpResult opResult, const AnalysisState &state) {
+ auto bufferizableOp = cast<BufferizableOpInterface>(opResult.getDefiningOp());
+ SmallVector<OpOperand *> opOperands =
+ bufferizableOp.getAliasingOpOperand(opResult, state);
+
+ // Case 1: OpResults that have no aliasing OpOperand usually bufferize to
+ // memory writes.
+ if (opOperands.empty())
+ return true;
+
+ // Case 2: If an aliasing OpOperand bufferizes to a memory write, the OpResult
+ // may bufferize to a memory write.
+ if (llvm::any_of(opOperands, [&](OpOperand *operand) {
+ return state.bufferizesToMemoryWrite(*operand);
+ }))
+ return true;
+
+ // Case 3: Check if a nested aliasing OpOperand value bufferizes to a memory
+ // write. (Or: The reverse SSA use-def chain ends inside the reigon.) In that
+ // case, the OpResult bufferizes to a memory write. E.g.:
+ //
+ // %0 = "some_writing_op" : tensor<?xf32>
+ // %r = scf.if ... -> tensor<?xf32> {
+ // scf.yield %0 : tensor<?xf32>
+ // } else {
+ // %1 = "another_writing_op"(%0) : tensor<?xf32>
+ // scf.yield %1 : tensor<?xf32>
+ // }
+ // "some_reading_op"(%r)
+ //
+ // %r bufferizes to a memory write because an aliasing OpOperand value (%1)
+ // bufferizes to a memory write and the defining op is inside the scf.if.
+ //
+ // Note: This treatment of surrouding ops is useful for ops that have a
+ // region but no OpOperand such as scf.if or scf.execute_region. It simplifies
+ // the analysis considerably.
+ //
+ // "another_writing_op" in the above example should be able to bufferize
+ // inplace in the absence of another read of %0. However, if the scf.if op
+ // would not be considered a "write", the analysis would detect the
+ // following conflict:
+ //
+ // * read = some_reading_op
+ // * lastWrite = %0 (Note: The last write of %r would be a set: {%0, %1}.)
+ // * conflictingWrite = %1
+ //
+ auto isMemoryWriteInsideOp = [&](Value v) {
+ Operation *op = getOwnerOfValue(v);
+ if (!opResult.getDefiningOp()->isAncestor(op))
+ return false;
+ return state.bufferizesToMemoryWrite(v);
+ };
+ for (OpOperand *operand : opOperands) {
+ if (!state
+ .findValueInReverseUseDefChain(operand->get(),
+ isMemoryWriteInsideOp,
+ /*followEquivalentOnly=*/false)
+ .empty())
+ return true;
+ }
+ return false;
+}
+
+FailureOr<BaseMemRefType> bufferization::detail::defaultGetBufferType(
+ Value value, const BufferizationOptions &options,
+ const DenseMap<Value, BaseMemRefType> &fixedTypes) {
+ assert(value.getType().isa<TensorType>() && "expected tensor type");
+
+ // No further analysis is possible for a block argument.
+ if (value.isa<BlockArgument>())
+ return bufferization::getMemRefType(value, options);
+
+ // Value is an OpResult.
+ Operation *op = getOwnerOfValue(value);
+ auto opResult = value.cast<OpResult>();
+ auto bufferizableOp = cast<BufferizableOpInterface>(op);
+ AnalysisState state(options);
+ auto aliasingOperands = bufferizableOp.getAliasingOpOperand(opResult, state);
+ if (!aliasingOperands.empty() &&
+ bufferizableOp.bufferRelation(opResult, state) ==
+ BufferRelation::Equivalent) {
+ // If the OpResult has an equivalent OpOperand, both OpResult and
+ // OpOperand bufferize to the exact same buffer type.
+ Value equivalentOperand = aliasingOperands.front()->get();
+ return getBufferType(equivalentOperand, options, fixedTypes);
+ }
+
+ // If we do not know the memory space and there is no default memory space,
+ // report a failure.
+ if (!options.defaultMemorySpace.has_value())
+ return op->emitError("could not infer memory space");
+
+ return getMemRefType(value, options, /*layout=*/{},
+ *options.defaultMemorySpace);
+}
+
bool bufferization::detail::defaultIsRepetitiveRegion(
BufferizableOpInterface bufferizableOp, unsigned index) {
assert(index < bufferizableOp->getNumRegions() && "invalid region index");
@@ -908,3 +917,23 @@ bool bufferization::detail::defaultIsRepetitiveRegion(
return false;
return regionInterface.isRepetitiveRegion(index);
}
+
+SmallVector<OpOperand *>
+bufferization::detail::unknownGetAliasingOpOperand(OpResult opResult) {
+ // Conservatively assume that everything is aliasing.
+ SmallVector<OpOperand *> r;
+ for (OpOperand &operand : opResult.getDefiningOp()->getOpOperands())
+ if (operand.get().getType().isa<TensorType>())
+ r.push_back(&operand);
+ return r;
+}
+
+SmallVector<OpResult>
+bufferization::detail::unknownGetAliasingOpResult(OpOperand &opOperand) {
+ // Conservatively assume that everything is aliasing.
+ SmallVector<OpResult> r;
+ for (OpResult result : opOperand.getOwner()->getOpResults())
+ if (result.getType().isa<TensorType>())
+ r.push_back(result);
+ return r;
+}
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
index 34cdf147ca439..2a6c6af89654f 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/FuncBufferizableOpInterfaceImpl.cpp
@@ -181,15 +181,9 @@ struct CallOpInterface
func::CallOp callOp = cast<func::CallOp>(op);
FuncOp funcOp = getCalledFunction(callOp);
assert(funcOp && "expected CallOp to a FuncOp");
- if (getFuncOpAnalysisState(state, funcOp) !=
- FuncOpAnalysisState::Analyzed) {
+ if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
// FuncOp not analyzed yet. Any OpResult may be aliasing.
- SmallVector<OpResult> result;
- for (OpResult opResult : op->getOpResults())
- if (opResult.getType().isa<TensorType>())
- result.push_back(opResult);
- return result;
- }
+ return detail::unknownGetAliasingOpResult(opOperand);
// Get aliasing results from state.
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
@@ -208,15 +202,9 @@ struct CallOpInterface
func::CallOp callOp = cast<func::CallOp>(op);
FuncOp funcOp = getCalledFunction(callOp);
assert(funcOp && "expected CallOp to a FuncOp");
- if (getFuncOpAnalysisState(state, funcOp) !=
- FuncOpAnalysisState::Analyzed) {
+ if (getFuncOpAnalysisState(state, funcOp) != FuncOpAnalysisState::Analyzed)
// FuncOp not analyzed yet. Any OpOperand may be aliasing.
- SmallVector<OpOperand *> result;
- for (OpOperand &opOperand : op->getOpOperands())
- if (opOperand.get().getType().isa<TensorType>())
- result.push_back(&opOperand);
- return result;
- }
+ return detail::unknownGetAliasingOpOperand(opResult);
// Get aliasing bbArgs from state.
const FuncAnalysisState &funcState = getFuncAnalysisState(state);
diff --git a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
index 8570352b52b4b..58ff9809640ab 100644
--- a/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
+++ b/mlir/lib/Dialect/Bufferization/Transforms/OneShotAnalysis.cpp
@@ -895,10 +895,9 @@ static LogicalResult inPlaceAnalysis(SmallVector<Operation *> &ops,
auto analyzeOp = [&](Operation *op) {
for (OpOperand &opOperand : op->getOpOperands())
if (opOperand.get().getType().isa<TensorType>())
- if (auto bufferizableOp = state.getOptions().dynCastBufferizableOp(op))
- if (failed(bufferizableInPlaceAnalysisImpl(opOperand, aliasInfo,
- state, domInfo)))
- return failure();
+ if (failed(bufferizableInPlaceAnalysisImpl(opOperand, aliasInfo, state,
+ domInfo)))
+ return failure();
return success();
};
@@ -1024,11 +1023,10 @@ annotateOpsWithBufferizationMarkers(Operation *op,
const BufferizationAliasInfo &aliasInfo,
const BufferizationOptions &options) {
// Add __inplace_operands_attr__.
- op->walk([&](BufferizableOpInterface bufferizableOp) {
- if (options.isOpAllowed(bufferizableOp.getOperation()))
- for (OpOperand &opOperand : bufferizableOp->getOpOperands())
- if (opOperand.get().getType().isa<TensorType>())
- setInPlaceOpOperand(opOperand, aliasInfo.isInPlace(opOperand));
+ op->walk([&](Operation *op) {
+ for (OpOperand &opOperand : op->getOpOperands())
+ if (opOperand.get().getType().isa<TensorType>())
+ setInPlaceOpOperand(opOperand, aliasInfo.isInPlace(opOperand));
});
}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
new file mode 100644
index 0000000000000..6da126837a1fa
--- /dev/null
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-analysis.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only" -allow-unregistered-dialect -split-input-file | FileCheck %s
+
+// CHECK-LABEL: func @unknown_op_aliasing(
+func.func @unknown_op_aliasing(%f: f32, %f2: f32, %pos: index) -> f32 {
+ %0 = tensor.empty() : tensor<10xf32>
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+ %1 = linalg.fill ins(%f : f32) outs(%0 : tensor<10xf32>) -> tensor<10xf32>
+
+ // Something must bufferize out-of-place because the op may return an alias
+ // of %1.
+ // CHECK: "dummy.dummy_op"(%{{.*}}) {__inplace_operands_attr__ = ["false"]}
+ %alias = "dummy.dummy_op"(%1) : (tensor<10xf32>) -> (tensor<10xf32>)
+
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+ %2 = linalg.fill ins(%f2 : f32) outs(%1 : tensor<10xf32>) -> tensor<10xf32>
+ %3 = tensor.extract %alias[%pos] : tensor<10xf32>
+ return %3 : f32
+}
+
+// -----
+
+// CHECK-LABEL: func @unknown_op_writing(
+func.func @unknown_op_writing(%f: f32, %f2: f32, %pos: index) -> f32 {
+ %0 = tensor.empty() : tensor<10xf32>
+ // CHECK: linalg.fill {__inplace_operands_attr__ = ["none", "true"]}
+ %1 = linalg.fill ins(%f : f32) outs(%0 : tensor<10xf32>) -> tensor<10xf32>
+
+ // The op may bufferize to a memory write, so it must bufferize out-of-place.
+ // CHECK: "dummy.dummy_op"(%{{.*}}) {__inplace_operands_attr__ = ["false"]}
+ "dummy.dummy_op"(%1) : (tensor<10xf32>) -> ()
+
+ %3 = tensor.extract %1[%pos] : tensor<10xf32>
+ return %3 : f32
+}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
index 35eba921ccb16..f01e4ecfc914c 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-partial.mlir
@@ -100,11 +100,14 @@ func.func @use_of_bufferizable_op_in_unbufferizable_op(
%t1: tensor<?xf32>, %o: index, %s: index) -> (tensor<?xf32>, tensor<?xf32>) {
// CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]]
// CHECK: %[[subview:.*]] = memref.subview %[[m1]]
+ // The op must alloc because "test.dummy" may bufferize to a memory write.
+ // CHECK: %[[alloc:.*]] = memref.alloc
+ // CHECK: memref.copy %[[subview]], %[[alloc]]
%0 = tensor.extract_slice %t1[%o][%s][1] : tensor<?xf32> to tensor<?xf32>
- // CHECK: %[[subview_tensor:.*]] = bufferization.to_tensor %[[subview]]
- // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[subview_tensor]])
+ // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]]
+ // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[alloc_tensor]])
%1 = "test.dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32>
- // CHECK: return %[[subview_tensor]], %[[dummy]]
+ // CHECK: return %[[alloc_tensor]], %[[dummy]]
return %0, %1 : tensor<?xf32>, tensor<?xf32>
}
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
index e7e8252a95b55..0f8d886dadde0 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize-pass-statistics.mlir
@@ -5,7 +5,7 @@
// CHECK: (S) 1 num-buffer-alloc
// CHECK: (S) 1 num-buffer-dealloc
// CHECK: (S) 1 num-tensor-in-place
-// CHECK: (S) 1 num-tensor-out-of-place
+// CHECK: (S) 2 num-tensor-out-of-place
func.func @read_after_write_conflict(%cst : f32, %idx : index, %idx2 : index)
-> (f32, f32) {
%t = "test.dummy_op"() : () -> (tensor<10xf32>)
diff --git a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
index 4c5c5919a79d0..e980b2b777654 100644
--- a/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
+++ b/mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir
@@ -136,7 +136,7 @@ func.func @copy_deallocated() -> tensor<10xf32> {
// CHECK-LABEL: func @select_
diff erent_tensors(
// CHECK-SAME: %[[t:.*]]: tensor<?xf32>
-func.func @select_
diff erent_tensors(%t: tensor<?xf32>, %sz: index, %c: i1) -> tensor<?xf32> {
+func.func @select_
diff erent_tensors(%t: tensor<?xf32>, %sz: index, %pos: index, %c: i1) -> f32 {
// CHECK-DAG: %[[m:.*]] = bufferization.to_memref %[[t]] : memref<?xf32, strided{{.*}}>
// CHECK-DAG: %[[alloc:.*]] = memref.alloc(%{{.*}}) {{.*}} : memref<?xf32>
%0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
@@ -145,7 +145,8 @@ func.func @select_
diff erent_tensors(%t: tensor<?xf32>, %sz: index, %c: i1) -> te
// CHECK: %[[casted:.*]] = memref.cast %[[alloc]] : memref<?xf32> to memref<?xf32, strided{{.*}}>
// CHECK: arith.select %{{.*}}, %[[casted]], %[[m]]
%1 = arith.select %c, %0, %t : tensor<?xf32>
- return %1 : tensor<?xf32>
+ %2 = tensor.extract %1[%pos] : tensor<?xf32>
+ return %2 : f32
}
// -----
More information about the Mlir-commits
mailing list