[Mlir-commits] [mlir] [mlir][tensor] Make tensor::PadOp a ReifyRankedShapedTypeOpInterface and add a PadOp::FoldReifiedShape canonicalization (PR #145732)
Fabian Mora
llvmlistbot at llvm.org
Wed Jun 25 12:04:13 PDT 2025
================
@@ -3791,13 +3792,78 @@ struct FoldConsecutiveConstantPadding : public OpRewritePattern<tensor::PadOp> {
}
};
+struct FoldReifiedShape : public OpRewritePattern<tensor::PadOp> {
+ using OpRewritePattern<tensor::PadOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(tensor::PadOp padOp,
+ PatternRewriter &rewriter) const override {
+ if (padOp.getNofold()) {
+ return rewriter.notifyMatchFailure(padOp, "skipping unfoldable pad");
+ }
+
+ ReifiedRankedShapedTypeDims reifiedResultShapes;
+ if (failed(reifyResultShapes(rewriter, padOp, reifiedResultShapes)))
+ return failure();
+
+ SmallVector<int64_t> newShape;
+ for (const auto &[s, ofr] : llvm::zip_equal(
+ padOp.getResultType().getShape(), reifiedResultShapes.front())) {
+ std::optional<int64_t> maybeCst = getConstantIntValue(ofr);
+ // Reification does not add static information, just use existing shape.
+ if (!maybeCst.has_value()) {
+ newShape.push_back(s);
+ continue;
+ }
+ int64_t cst = *maybeCst;
+ assert((ShapedType::isDynamic(s) || s == cst) && "constants must agree!");
+ newShape.push_back(cst);
+ }
+ if (newShape == padOp.getResultType().getShape())
+ return failure();
+
+ Type oldType = padOp.getResultType();
+ Type newType =
+ RankedTensorType::Builder(padOp.getResultType()).setShape(newShape);
+ Location loc = padOp->getLoc();
+ Operation *newPad = rewriter.clone(*padOp);
+ newPad->getResult(0).setType(newType);
+ rewriter.replaceOpWithNewOp<tensor::CastOp>(padOp, oldType,
+ newPad->getResult(0));
+ return success();
+ }
+};
+
} // namespace
+LogicalResult
+PadOp::reifyResultShapes(OpBuilder &b,
+ ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
+ reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
+ SmallVector<OpFoldResult> lp = getMixedLowPad();
+ SmallVector<OpFoldResult> hp = getMixedHighPad();
+ for (int64_t i = 0; i < getResultType().getRank(); ++i) {
+ if (!getType().isDynamicDim(i)) {
+ reifiedReturnShapes[0][i] = b.getIndexAttr(getType().getDimSize(i));
+ continue;
+ }
+ Location loc = getLoc();
+ Value dim = b.createOrFold<tensor::DimOp>(
+ loc, getSource(), b.create<arith::ConstantIndexOp>(loc, i));
+
+ affine::AffineBuilder ab(b, loc);
+ AffineExpr d0, d1, d2;
+ bindDims(b.getContext(), d0, d1, d2);
+ reifiedReturnShapes[0][i] = affine::makeComposedFoldedAffineApply(
+ b, loc, {d0 + d1 + d2}, {dim, lp[i], hp[i]});
+ }
+ return success();
+}
+
void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
FoldOrthogonalPaddings, FoldStaticPadding,
- FoldConsecutiveConstantPadding>(context);
+ FoldConsecutiveConstantPadding, FoldReifiedShape>(context);
----------------
fabianmcg wrote:
Ok, `--linalg-fold-unit-extent-dims --canonicalize` without the folder is getting there. So I might be missing another transform.
https://github.com/llvm/llvm-project/pull/145732
More information about the Mlir-commits
mailing list