[Mlir-commits] [mlir] 9f815cb - [mlir][linalg] Cleanup LinalgOp usage in test passes.
Tobias Gysi
llvmlistbot at llvm.org
Thu Jun 3 05:08:12 PDT 2021
Author: Tobias Gysi
Date: 2021-06-03T12:07:29Z
New Revision: 9f815cb57833a00a4db6e24598d0a80433e404be
URL: https://github.com/llvm/llvm-project/commit/9f815cb57833a00a4db6e24598d0a80433e404be
DIFF: https://github.com/llvm/llvm-project/commit/9f815cb57833a00a4db6e24598d0a80433e404be.diff
LOG: [mlir][linalg] Cleanup LinalgOp usage in test passes.
Replace the uses of deprecated Structured Op Interface methods in TestLinalgElementwiseFusion.cpp, TestLinalgFusionTransforms.cpp, and Transforms.cpp. The patch is based on https://reviews.llvm.org/D103394.
Differential Revision: https://reviews.llvm.org/D103528
Added:
Modified:
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index c85cd0cdd598..15420cc302da 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -167,9 +167,9 @@ static LogicalResult rewriteAsPaddedOp(PatternRewriter &rewriter,
// If the op is fully static, it does not need padding.
// TODO: there are cases where we may still want to pad to larger sizes.
- if (llvm::all_of(opToPad.getShapedOperands(), [](Value v) {
- return v.getType().cast<RankedTensorType>().hasStaticShape();
- }))
+ assert(opToPad.hasTensorSemantics() &&
+ "expected operation to have tensor semantics");
+ if (!opToPad.hasDynamicShape())
return success();
OpBuilder::InsertionGuard g(rewriter);
@@ -177,16 +177,16 @@ static LogicalResult rewriteAsPaddedOp(PatternRewriter &rewriter,
rewriter.setInsertionPointAfter(opToPad);
// Make a copy of the shaped operands and update it.
SmallVector<Value> newOperands;
- newOperands.reserve(opToPad.getNumShapedOperands());
- for (OpOperand &operand : opToPad.getShapedOpOperands()) {
+ newOperands.reserve(opToPad.getNumInputsAndOutputs());
+ for (OpOperand *opOperand : opToPad.getInputAndOutputOperands()) {
Value paddedOperand;
// If padding was requested but the shape cannot be bounded statically then
// the pattern fails to apply.
- if (failed(padOperandToSmallestStaticBoundingBox(rewriter, opToPad, operand,
- options, paddedOperand))) {
+ if (failed(padOperandToSmallestStaticBoundingBox(
+ rewriter, opToPad, *opOperand, options, paddedOperand))) {
return failure();
}
- newOperands.push_back(paddedOperand ? paddedOperand : operand.get());
+ newOperands.push_back(paddedOperand ? paddedOperand : opOperand->get());
}
// Clone `opToPad` to operate on the statically padded shapes.
diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
index d0812ab8ec0d..d7ec7561a3f6 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
@@ -24,8 +24,8 @@ static void addOperands(Operation *op, SetVector<Value> &operandSet) {
return;
TypeSwitch<Operation *, void>(op)
.Case<linalg::LinalgOp>([&](linalg::LinalgOp linalgOp) {
- operandSet.insert(linalgOp.getInputs().begin(),
- linalgOp.getInputs().end());
+ SmallVector<Value> inputOperands = linalgOp.getInputOperands();
+ operandSet.insert(inputOperands.begin(), inputOperands.end());
})
.Default([&](Operation *operation) {
operandSet.insert(operation->operand_begin(), operation->operand_end());
diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
index 4413faca5dc6..26e4f6a95b02 100644
--- a/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
+++ b/mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
@@ -147,14 +147,14 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
// Tile and Fuse for tensors inputs (TODO: all tensor operands).
bool changed = false;
for (LinalgOp linalgOp : llvm::reverse(linalgOps)) {
- for (OpOperand &opOperand : linalgOp.getShapedOpOperands()) {
- if (opOperand.get().getType().isa<MemRefType>()) {
+ for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
+ if (opOperand->get().getType().isa<MemRefType>()) {
// TODO: LinalgDependenceGraph should be able to update itself.
// The current naive and expensive reconstruction of the graph should be
// removed.
linalg::Aliases aliases;
linalg::LinalgDependenceGraph graph(aliases, linalgOps);
- if (auto info = fuseProducerOfBuffer(b, opOperand, graph)) {
+ if (auto info = fuseProducerOfBuffer(b, *opOperand, graph)) {
auto *originalOp = info->originalProducer.getOperation();
eraseSet.insert(originalOp);
auto *originalOpInLinalgOpsVector =
@@ -163,11 +163,11 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
changed = true;
}
} else {
- assert(opOperand.get().getType().isa<RankedTensorType>());
+ assert(opOperand->get().getType().isa<RankedTensorType>());
// Tile and Fuse tensor input.
- if (opOperand.getOperandNumber() >= linalgOp.getNumInputs())
+ if (opOperand->getOperandNumber() >= linalgOp.getNumInputs())
continue;
- if (auto info = fuseProducerOfTensor(b, opOperand)) {
+ if (auto info = fuseProducerOfTensor(b, *opOperand)) {
auto *originalOp = info->originalProducer.getOperation();
auto *originalOpInLinalgOpsVector =
std::find(linalgOps.begin(), linalgOps.end(), originalOp);
More information about the Mlir-commits
mailing list