[Mlir-commits] [mlir] 1ea5b48 - [mlir][Linalg] NFC - Improve debug messages for padding
Nicolas Vasilache
llvmlistbot at llvm.org
Wed Mar 1 23:58:31 PST 2023
Author: Nicolas Vasilache
Date: 2023-03-01T23:58:26-08:00
New Revision: 1ea5b4853e8431abe756290d57ea286b9b280b50
URL: https://github.com/llvm/llvm-project/commit/1ea5b4853e8431abe756290d57ea286b9b280b50
DIFF: https://github.com/llvm/llvm-project/commit/1ea5b4853e8431abe756290d57ea286b9b280b50.diff
LOG: [mlir][Linalg] NFC - Improve debug messages for padding
Added:
Modified:
mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
Removed:
################################################################################
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
index 47db0b712202e..2c1c56da0b114 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
@@ -175,11 +175,13 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
ArrayRef<int64_t> paddingDimensions,
ArrayRef<Attribute> paddingValues,
ArrayRef<bool> packPaddings, LinalgOp &paddedOp) {
+ LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
Location loc = opToPad->getLoc();
// TODO: there are cases where we may still want to pad to larger sizes.
- assert(opToPad.hasTensorSemantics() &&
- "expected operation to have tensor semantics");
+ if (!opToPad.hasTensorSemantics())
+ return rewriter.notifyMatchFailure(opToPad,
+ "expected operation on tensors");
OpBuilder::InsertionGuard g(rewriter);
// Set IP after op because we also take the dims of the original output.
@@ -193,15 +195,22 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
rewriter, opToPad, &opOperand, paddingDimensions, paddingValues,
packPaddings);
// Exit if `paddingDimensions` cannot be bounded statically.
- if (failed(paddedOperand))
- return failure();
+ if (failed(paddedOperand)) {
+ LLVM_DEBUG(DBGS() << "--operand cannot be bound statically : "
+ << opOperand.get() << " -> FAIL\n");
+ return rewriter.notifyMatchFailure(opToPad,
+ "operand cannot be bound statically");
+ }
newOperands.push_back(*paddedOperand);
}
SmallVector<SmallVector<Value>> reifiedResultShapes;
if (failed(cast<ReifyRankedShapedTypeOpInterface>(opToPad.getOperation())
- .reifyResultShapes(rewriter, reifiedResultShapes)))
- return failure();
+ .reifyResultShapes(rewriter, reifiedResultShapes))) {
+ LLVM_DEBUG(DBGS() << "--failed to reify result shapes -> FAIL\n");
+ return rewriter.notifyMatchFailure(opToPad,
+ "failed to reify result shapes");
+ }
assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
"expected same number of results");
@@ -210,6 +219,7 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
ValueRange(newOperands).take_back(opToPad.getNumDpsInits()).getTypes();
// clone **should** properly notify the rewriter.
paddedOp = clone(rewriter, opToPad, resultTensorTypes, newOperands);
+ LLVM_DEBUG(DBGS() << "--cloned padded op: " << paddedOp << "\n");
// Recover the slice out of the new static results. This keeps the original
// linalg op around because it uses the dims of the original results.
More information about the Mlir-commits
mailing list