[Mlir-commits] [mlir] [mlir][vector] transpose(broadcast) -> broadcast canonicalization (PR #135096)
James Newling
llvmlistbot at llvm.org
Thu Apr 10 11:04:32 PDT 2025
================
@@ -6155,12 +6155,115 @@ class FoldTransposeCreateMask final : public OpRewritePattern<TransposeOp> {
}
};
+/// Folds transpose(broadcast(x)) to broadcast(x) if the transpose is
+/// 'order preserving', where 'order preserving' means the flattened
+/// inputs and outputs of the transpose have identical (numerical) values.
+///
+/// Example:
+/// ```
+/// %0 = vector.broadcast %input : vector<1x1xi32> to vector<1x8xi32>
+/// %1 = vector.transpose %0, [1, 0] : vector<1x8xi32>
+/// to vector<8x1xi32>
+/// ```
+/// can be rewritten as the equivalent
+/// ```
+/// %0 = vector.broadcast %input : vector<1x1xi32> to vector<8x1xi32>.
+/// ```
+/// The algorithm works by partitioning dimensions into groups that can be
+/// locally permuted while preserving order, and checks that the transpose
+/// only permutes within these groups.
+class FoldTransposeBroadcast : public OpRewritePattern<vector::TransposeOp> {
+public:
+ using OpRewritePattern::OpRewritePattern;
+ FoldTransposeBroadcast(MLIRContext *context, PatternBenefit benefit = 1)
+ : OpRewritePattern<vector::TransposeOp>(context, benefit) {}
+
+ static bool canFoldIntoPrecedingBroadcast(vector::TransposeOp transpose) {
+
+ vector::BroadcastOp broadcast =
+ transpose.getVector().getDefiningOp<vector::BroadcastOp>();
+ if (!broadcast)
+ return false;
+
+ auto inputType = dyn_cast<VectorType>(broadcast.getSourceType());
+ bool inputIsScalar = !inputType;
+ ArrayRef<int64_t> inputShape = inputType.getShape();
+ int64_t inputRank = inputType.getRank();
+ int64_t outputRank = transpose.getType().getRank();
+ int64_t deltaRank = outputRank - inputRank;
+
+ // transpose(broadcast(scalar)) -> broadcast(scalar) is always valid
+ if (inputIsScalar)
+ return true;
+
+ // Return true if all permutation destinations for indices in [low, high)
+ // are in [low, high), so the permutation is local to the group.
+ auto isGroupBound = [&](int low, int high) {
+ ArrayRef<int64_t> permutation = transpose.getPermutation();
+ for (int j = low; j < high; ++j) {
+ if (permutation[j] < low || permutation[j] >= high) {
+ return false;
+ }
+ }
+ return true;
+ };
+
+ // Groups are either contiguous sequences of 1s and non-1s (1-element
+ // groups). Consider broadcasting 4x1x1x7 to 2x3x4x5x6x7. This is equivalent
+ // to broadcasting from 1x1x4x1x1x7.
+ // ^^^ ^ ^^^ ^
+ // groups: 0 1 2 3
+ // Order preserving permutations for this example are ones that only permute
+ // within the groups [0,1] and [3,4], like (1 0 2 4 3 5 6).
+ int low = 0;
+ for (int inputIndex = 0; inputIndex < inputRank; ++inputIndex) {
+ bool notOne = inputShape[inputIndex] != 1;
+ bool prevNotOne = (inputIndex != 0 && inputShape[inputIndex - 1] != 1);
+ bool groupEndFound = notOne || prevNotOne;
+ if (groupEndFound) {
+ int high = inputIndex + deltaRank;
+ if (!isGroupBound(low, high)) {
+ return false;
+ }
+ low = high;
+ }
+ }
+ if (!isGroupBound(low, outputRank)) {
+ return false;
+ }
+
+ // The preceding logic ensures that by this point, the ouutput of the
+ // transpose is definitely broadcastable from the input shape. So we don't
+ // need to call 'vector::isBroadcastableTo', but asserting here just as a
+ // sanity check:
+ bool isBroadcastable =
+ vector::isBroadcastableTo(inputType, transpose.getResultVectorType()) ==
+ vector::BroadcastableToResult::Success;
+ assert(isBroadcastable &&
+ "(I think) it must be broadcastable at this point.");
+
+ return true;
+ }
+
+ LogicalResult matchAndRewrite(vector::TransposeOp transpose,
+ PatternRewriter &rewriter) const override {
+ if (!canFoldIntoPrecedingBroadcast(transpose))
+ return failure();
+
+ rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
+ transpose, transpose.getResultVectorType(), transpose.getVector());
+
+ return success();
+ }
+};
+
} // namespace
void vector::TransposeOp::getCanonicalizationPatterns(
RewritePatternSet &results, MLIRContext *context) {
results.add<FoldTransposeCreateMask, FoldTransposedScalarBroadcast,
- TransposeFolder, FoldTransposeSplat>(context);
+ TransposeFolder, FoldTransposeSplat, FoldTransposeBroadcast>(
+ context);
----------------
newling wrote:
I think so. The new broadcast is the same rank and 'volume' as the old one, so i can't think of sense in which it'll be more complex. So the removal of the transpose clinches it in my mind!
As an aside: It would be nice if MLIR/dialects defined an 'energy function' defining what classifies as a canonicalization (i.e. something to guarantee every rewrite takes us closer to a fixed point = energy minimum).
https://github.com/llvm/llvm-project/pull/135096
More information about the Mlir-commits
mailing list