[Mlir-commits] [mlir] [mlir][tensor] Make tensor::PadOp a ReifyRankedShapedTypeOpInterface and add a PadOp::FoldReifiedShape canonicalization (PR #145732)
Fabian Mora
llvmlistbot at llvm.org
Wed Jun 25 16:57:50 PDT 2025
================
@@ -3791,13 +3792,78 @@ struct FoldConsecutiveConstantPadding : public OpRewritePattern<tensor::PadOp> {
}
};
+struct FoldReifiedShape : public OpRewritePattern<tensor::PadOp> {
+ using OpRewritePattern<tensor::PadOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(tensor::PadOp padOp,
+ PatternRewriter &rewriter) const override {
+ if (padOp.getNofold()) {
+ return rewriter.notifyMatchFailure(padOp, "skipping unfoldable pad");
+ }
+
+ ReifiedRankedShapedTypeDims reifiedResultShapes;
+ if (failed(reifyResultShapes(rewriter, padOp, reifiedResultShapes)))
+ return failure();
+
+ SmallVector<int64_t> newShape;
+ for (const auto &[s, ofr] : llvm::zip_equal(
+ padOp.getResultType().getShape(), reifiedResultShapes.front())) {
+ std::optional<int64_t> maybeCst = getConstantIntValue(ofr);
+ // Reification does not add static information, just use existing shape.
+ if (!maybeCst.has_value()) {
+ newShape.push_back(s);
+ continue;
+ }
+ int64_t cst = *maybeCst;
+ assert((ShapedType::isDynamic(s) || s == cst) && "constants must agree!");
+ newShape.push_back(cst);
+ }
+ if (newShape == padOp.getResultType().getShape())
+ return failure();
+
+ Type oldType = padOp.getResultType();
+ Type newType =
+ RankedTensorType::Builder(padOp.getResultType()).setShape(newShape);
+ Location loc = padOp->getLoc();
+ Operation *newPad = rewriter.clone(*padOp);
+ newPad->getResult(0).setType(newType);
+ rewriter.replaceOpWithNewOp<tensor::CastOp>(padOp, oldType,
+ newPad->getResult(0));
+ return success();
+ }
+};
+
} // namespace
+LogicalResult
+PadOp::reifyResultShapes(OpBuilder &b,
+ ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
+ reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
+ SmallVector<OpFoldResult> lp = getMixedLowPad();
+ SmallVector<OpFoldResult> hp = getMixedHighPad();
+ for (int64_t i = 0; i < getResultType().getRank(); ++i) {
+ if (!getType().isDynamicDim(i)) {
+ reifiedReturnShapes[0][i] = b.getIndexAttr(getType().getDimSize(i));
+ continue;
+ }
+ Location loc = getLoc();
+ Value dim = b.createOrFold<tensor::DimOp>(
+ loc, getSource(), b.create<arith::ConstantIndexOp>(loc, i));
+
+ affine::AffineBuilder ab(b, loc);
+ AffineExpr d0, d1, d2;
+ bindDims(b.getContext(), d0, d1, d2);
+ reifiedReturnShapes[0][i] = affine::makeComposedFoldedAffineApply(
+ b, loc, {d0 + d1 + d2}, {dim, lp[i], hp[i]});
+ }
+ return success();
+}
+
void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
FoldOrthogonalPaddings, FoldStaticPadding,
- FoldConsecutiveConstantPadding>(context);
+ FoldConsecutiveConstantPadding, FoldReifiedShape>(context);
----------------
fabianmcg wrote:
Tomorrow I'll take a look if I can augment the pass and get the desired behavior because I agree this doesn't really fit as a canonicalization. The main thing we want from this transform is that in some circumstances when we tile we can get static shapes after applying this, this is shown in the test.
https://github.com/llvm/llvm-project/pull/145732
More information about the Mlir-commits
mailing list