[Mlir-commits] [mlir] [mlir][x86vector] Sink Vector.transfer_reads and vector.load before the consumer (PR #169333)
Arun Thangamani
llvmlistbot at llvm.org
Mon Nov 24 06:36:26 PST 2025
https://github.com/arun-thmn created https://github.com/llvm/llvm-project/pull/169333
Adds a pattern that sinks vector producer ops (`vector.load` and `vector.transfer_read`) forward in a block to their earliest legal use, reducing live ranges and improving scheduling opportunities.
>From 95260b8b1a22adfe4f0412f67b8fedea2d200294 Mon Sep 17 00:00:00 2001
From: Arun Thangamani <arun.thangamani at intel.com>
Date: Mon, 24 Nov 2025 06:32:47 -0800
Subject: [PATCH] sink vector transfer reads and loads before the consumer
---
.../TransformOps/X86VectorTransformOps.td | 11 ++
.../mlir/Dialect/X86Vector/Transforms.h | 4 +
.../TransformOps/X86VectorTransformOps.cpp | 5 +
.../X86Vector/Transforms/CMakeLists.txt | 1 +
.../Transforms/SinkVectorProducerOps.cpp | 87 ++++++++++
.../X86Vector/sink-vector-producer-ops.mlir | 154 ++++++++++++++++++
6 files changed, 262 insertions(+)
create mode 100644 mlir/lib/Dialect/X86Vector/Transforms/SinkVectorProducerOps.cpp
create mode 100644 mlir/test/Dialect/X86Vector/sink-vector-producer-ops.mlir
diff --git a/mlir/include/mlir/Dialect/X86Vector/TransformOps/X86VectorTransformOps.td b/mlir/include/mlir/Dialect/X86Vector/TransformOps/X86VectorTransformOps.td
index 3c5294ff14fc7..12ba5e9f11141 100644
--- a/mlir/include/mlir/Dialect/X86Vector/TransformOps/X86VectorTransformOps.td
+++ b/mlir/include/mlir/Dialect/X86Vector/TransformOps/X86VectorTransformOps.td
@@ -38,6 +38,17 @@ def ApplyVectorContractToPackedTypeDotProductPatternsOp : Op<Transform_Dialect,
let assemblyFormat = "attr-dict";
}
+def ApplySinkVectorProducerOpsPatternsOp : Op<Transform_Dialect,
+ "apply_patterns.x86vector.sink_vector_producer_ops",
+ [DeclareOpInterfaceMethods<PatternDescriptorOpInterface>]> {
+ let description = [{
+ Collect patterns to sink vector producer operations forward in a block to
+ place them immediately before their first use.
+ }];
+
+ let assemblyFormat = "attr-dict";
+}
+
#endif // X86VECTOR_TRANSFORM_OPS
diff --git a/mlir/include/mlir/Dialect/X86Vector/Transforms.h b/mlir/include/mlir/Dialect/X86Vector/Transforms.h
index fc46dff63c2b7..b9c9054f57890 100644
--- a/mlir/include/mlir/Dialect/X86Vector/Transforms.h
+++ b/mlir/include/mlir/Dialect/X86Vector/Transforms.h
@@ -91,6 +91,10 @@ void populateVectorContractToFMAPatterns(RewritePatternSet &patterns);
void populateVectorContractToPackedTypeDotProductPatterns(
RewritePatternSet &patterns);
+// Performs forward scheduling of vector producer ops to minimize their live
+// range by placing them at their earliest legal use site
+void populateSinkVectorProducerOpsPatterns(RewritePatternSet &patterns);
+
//===----------------------------------------------------------------------===//
/// Helpers extracted from:
/// - clang/lib/Headers/avxintrin.h
diff --git a/mlir/lib/Dialect/X86Vector/TransformOps/X86VectorTransformOps.cpp b/mlir/lib/Dialect/X86Vector/TransformOps/X86VectorTransformOps.cpp
index 95db208207672..25772f2aa57f4 100644
--- a/mlir/lib/Dialect/X86Vector/TransformOps/X86VectorTransformOps.cpp
+++ b/mlir/lib/Dialect/X86Vector/TransformOps/X86VectorTransformOps.cpp
@@ -32,6 +32,11 @@ void mlir::transform::ApplyVectorContractToPackedTypeDotProductPatternsOp::
x86vector::populateVectorContractToPackedTypeDotProductPatterns(patterns);
}
+void mlir::transform::ApplySinkVectorProducerOpsPatternsOp::populatePatterns(
+ RewritePatternSet &patterns) {
+ x86vector::populateSinkVectorProducerOpsPatterns(patterns);
+}
+
//===----------------------------------------------------------------------===//
// Transform op registration
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/X86Vector/Transforms/CMakeLists.txt b/mlir/lib/Dialect/X86Vector/Transforms/CMakeLists.txt
index 2cab50fb591c4..cc4d3cac0f7ea 100644
--- a/mlir/lib/Dialect/X86Vector/Transforms/CMakeLists.txt
+++ b/mlir/lib/Dialect/X86Vector/Transforms/CMakeLists.txt
@@ -3,6 +3,7 @@ add_mlir_dialect_library(MLIRX86VectorTransforms
LegalizeForLLVMExport.cpp
VectorContractToFMA.cpp
VectorContractToPackedTypeDotProduct.cpp
+ SinkVectorProducerOps.cpp
LINK_LIBS PUBLIC
MLIRArithDialect
diff --git a/mlir/lib/Dialect/X86Vector/Transforms/SinkVectorProducerOps.cpp b/mlir/lib/Dialect/X86Vector/Transforms/SinkVectorProducerOps.cpp
new file mode 100644
index 0000000000000..85cb18687a4fc
--- /dev/null
+++ b/mlir/lib/Dialect/X86Vector/Transforms/SinkVectorProducerOps.cpp
@@ -0,0 +1,87 @@
+//===- SinkVectorProducerOps.cpp ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Vector/IR/VectorOps.h"
+#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
+#include "mlir/Dialect/X86Vector/Transforms.h"
+#include "mlir/Dialect/X86Vector/X86VectorDialect.h"
+
+#include "mlir/IR/BuiltinAttributes.h"
+#include "mlir/IR/Dominance.h"
+#include "mlir/IR/PatternMatch.h"
+
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
+
+using namespace mlir;
+using namespace mlir::vector;
+using namespace mlir::x86vector;
+
+/// Sink vector producers forward to reduce live ranges.
+/// This pattern applies to ops such as vector.load and vector.transfer_read.
+template <typename producerOp>
+struct SinkVectorProducerOps final : public OpRewritePattern<producerOp> {
+ using OpRewritePattern<producerOp>::OpRewritePattern;
+
+ LogicalResult matchAndRewrite(producerOp op,
+ PatternRewriter &rewriter) const override {
+
+ // Collect all users of the producer op.
+ llvm::SmallVector<Operation *> users;
+ for (OpResult result : op->getResults())
+ for (Operation *user : result.getUsers())
+ users.push_back(user);
+
+ // If there are no users, nothing to sink.
+ if (users.empty())
+ return failure();
+
+ // If the next op is already a user, do not move.
+ Operation *nextOp = op->getNextNode();
+ if (llvm::is_contained(users, nextOp))
+ return failure();
+
+ // Prevent pathological looping:
+ // If the next op produces values used by any of op's users, don't move.
+ llvm::SmallVector<Operation *> nextOpUsers;
+ for (OpResult result : nextOp->getResults())
+ for (Operation *user : result.getUsers())
+ nextOpUsers.push_back(user);
+ if (llvm::any_of(users, [&](Operation *x) {
+ return llvm::is_contained(nextOpUsers, x);
+ })) {
+ return failure();
+ }
+
+ // Find the nearest user by scanning forward.
+ while (nextOp) {
+ if (llvm::is_contained(users, nextOp))
+ break;
+
+ nextOp = nextOp->getNextNode();
+ }
+
+ if (!nextOp)
+ return failure();
+
+ // // Both ops must be in the same block to safely move.
+ if (op->getBlock() != nextOp->getBlock())
+ return failure();
+
+ // Move producer immediately before its first user.
+ op->moveBefore(nextOp);
+
+ return success();
+ }
+};
+
+void x86vector::populateSinkVectorProducerOpsPatterns(
+ RewritePatternSet &patterns) {
+ patterns.add<SinkVectorProducerOps<vector::TransferReadOp>,
+ SinkVectorProducerOps<vector::LoadOp>>(patterns.getContext());
+}
diff --git a/mlir/test/Dialect/X86Vector/sink-vector-producer-ops.mlir b/mlir/test/Dialect/X86Vector/sink-vector-producer-ops.mlir
new file mode 100644
index 0000000000000..04045b05bda49
--- /dev/null
+++ b/mlir/test/Dialect/X86Vector/sink-vector-producer-ops.mlir
@@ -0,0 +1,154 @@
+// RUN: mlir-opt %s -transform-interpreter -cse -split-input-file | FileCheck %s
+
+func.func @sink_vector_loads(%arg0: memref<16x16xf32>, %arg1: vector<8xf32>) -> vector<8xf32> {
+ %c0 = arith.constant 0 : index
+ %c8 = arith.constant 8 : index
+ %0 = vector.load %arg0[%c0, %c0] : memref<16x16xf32>, vector<8xf32>
+ %1 = vector.load %arg0[%c0, %c8] : memref<16x16xf32>, vector<8xf32>
+ %2 = vector.load %arg0[%c8, %c0] : memref<16x16xf32>, vector<8xf32>
+ %3 = vector.load %arg0[%c8, %c8] : memref<16x16xf32>, vector<8xf32>
+ %4 = vector.fma %0, %1, %arg1 : vector<8xf32>
+ %5 = vector.fma %2, %3, %4 : vector<8xf32>
+ return %5 : vector<8xf32>
+}
+
+// CHECK-LABEL: @sink_vector_loads
+// CHECK: vector.load
+// CHECK-NEXT: vector.load
+// CHECK-NEXT: vector.fma
+// CHECK-NEXT: vector.load
+// CHECK-NEXT: vector.load
+// CHECK-NEXT: vector.fma
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.x86vector.sink_vector_producer_ops
+ } : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @sink_vector_transfer_reads(%arg0: memref<16x16xf32>, %arg1: vector<8xf32>) -> vector<8xf32> {
+ %c0 = arith.constant 0 : index
+ %c8 = arith.constant 8 : index
+ %0 = ub.poison : f32
+ %1 = vector.transfer_read %arg0[%c0, %c0], %0 {in_bounds = [true]} : memref<16x16xf32>, vector<8xf32>
+ %2 = vector.transfer_read %arg0[%c0, %c8], %0 {in_bounds = [true]} : memref<16x16xf32>, vector<8xf32>
+ %3 = vector.transfer_read %arg0[%c8, %c0], %0 {in_bounds = [true]} : memref<16x16xf32>, vector<8xf32>
+ %4 = vector.transfer_read %arg0[%c8, %c8], %0 {in_bounds = [true]} : memref<16x16xf32>, vector<8xf32>
+ %5 = vector.fma %1, %2, %arg1 : vector<8xf32>
+ %6 = vector.fma %3, %4, %5 : vector<8xf32>
+ return %6 : vector<8xf32>
+}
+
+// CHECK-LABEL: @sink_vector_transfer_reads
+// CHECK: vector.transfer_read
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.fma
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.fma
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.x86vector.sink_vector_producer_ops
+ } : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @sink_vector_transfer_reads_tensor(%arg0: tensor<16x16xf32>, %arg1: vector<8xf32>) -> vector<8xf32> {
+ %c0 = arith.constant 0 : index
+ %c8 = arith.constant 8 : index
+ %0 = ub.poison : f32
+ %1 = vector.transfer_read %arg0[%c0, %c0], %0 {in_bounds = [true]} : tensor<16x16xf32>, vector<8xf32>
+ %2 = vector.transfer_read %arg0[%c0, %c8], %0 {in_bounds = [true]} : tensor<16x16xf32>, vector<8xf32>
+ %3 = vector.transfer_read %arg0[%c8, %c0], %0 {in_bounds = [true]} : tensor<16x16xf32>, vector<8xf32>
+ %4 = vector.transfer_read %arg0[%c8, %c8], %0 {in_bounds = [true]} : tensor<16x16xf32>, vector<8xf32>
+ %5 = vector.fma %1, %2, %arg1 : vector<8xf32>
+ %6 = vector.fma %3, %4, %5 : vector<8xf32>
+ return %6 : vector<8xf32>
+}
+
+// CHECK-LABEL: @sink_vector_transfer_reads_tensor
+// CHECK: vector.transfer_read
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.fma
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.transfer_read
+// CHECK-NEXT: vector.fma
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.x86vector.sink_vector_producer_ops
+ } : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @negative_no_infinite_looping(%arg0: memref<16x16xf32>, %arg1: vector<8xf32>) -> vector<8xf32> {
+ %c0 = arith.constant 0 : index
+ %c8 = arith.constant 8 : index
+ %0 = vector.load %arg0[%c0, %c0] : memref<16x16xf32>, vector<8xf32>
+ %1 = vector.load %arg0[%c0, %c8] : memref<16x16xf32>, vector<8xf32>
+ %2 = vector.fma %0, %1, %arg1 : vector<8xf32>
+ return %2: vector<8xf32>
+}
+
+// CHECK-LABEL: @negative_no_infinite_looping
+// CHECK: vector.load
+// CHECK-NEXT: vector.load
+// CHECK-NEXT: vector.fma
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.x86vector.sink_vector_producer_ops
+ } : !transform.any_op
+ transform.yield
+ }
+}
+
+// -----
+
+func.func @negative_no_sink_outside_block(%arg0: memref<8x16xf32>, %arg1: i1) -> vector<8xf32> {
+ %c0 = arith.constant 0 : index
+ %c8 = arith.constant 8 : index
+ %0 = vector.load %arg0[%c0, %c0] : memref<8x16xf32>, vector<8xf32>
+ %1 = vector.load %arg0[%c0, %c8] : memref<8x16xf32>, vector<8xf32>
+ %2 = scf.if %arg1 -> (vector<8xf32>) {
+ scf.yield %0 : vector<8xf32>
+ } else {
+ scf.yield %1 : vector<8xf32>
+ }
+ return %2 : vector<8xf32>
+}
+
+// CHECK-LABEL: @negative_no_sink_outside_block
+// CHECK: vector.load
+// CHECK-NEXT: vector.load
+// CHECK-NEXT: scf.if
+// CHECK-NEXT: scf.yield
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match ops{["func.func"]} in %arg0 : (!transform.any_op) -> !transform.any_op
+ transform.apply_patterns to %0 {
+ transform.apply_patterns.x86vector.sink_vector_producer_ops
+ } : !transform.any_op
+ transform.yield
+ }
+}
More information about the Mlir-commits
mailing list