[Mlir-commits] [mlir] d72e58e - [MLIR][LLVM] Propagate alignment attribute from memref to LLVM (#151380)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Tue Aug 5 09:07:01 PDT 2025
Author: Erick Ochoa Lopez
Date: 2025-08-05T12:06:57-04:00
New Revision: d72e58e422be747dfd1e42f8ad370420634190cb
URL: https://github.com/llvm/llvm-project/commit/d72e58e422be747dfd1e42f8ad370420634190cb
DIFF: https://github.com/llvm/llvm-project/commit/d72e58e422be747dfd1e42f8ad370420634190cb.diff
LOG: [MLIR][LLVM] Propagate alignment attribute from memref to LLVM (#151380)
Propagate alignment attribute from operations in the memref dialect to
the LLVM dialect.
Possible improvements: maybe the alignment attribute in LLVM's store and
load operations should be confined/constrained to i64? I believe that
way one can avoid typing the value in the attribute dictionary. I.e.,
from `{ alignment = 32 : i64 }` to `{ alignment = 32}`
Added:
Modified:
mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index dc2035b0700d0..d6bdd342c9045 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -895,8 +895,8 @@ struct LoadOpLowering : public LoadStoreOpLowering<memref::LoadOp> {
adaptor.getMemref(),
adaptor.getIndices(), kNoWrapFlags);
rewriter.replaceOpWithNewOp<LLVM::LoadOp>(
- loadOp, typeConverter->convertType(type.getElementType()), dataPtr, 0,
- false, loadOp.getNontemporal());
+ loadOp, typeConverter->convertType(type.getElementType()), dataPtr,
+ loadOp.getAlignment().value_or(0), false, loadOp.getNontemporal());
return success();
}
};
@@ -918,7 +918,8 @@ struct StoreOpLowering : public LoadStoreOpLowering<memref::StoreOp> {
getStridedElementPtr(rewriter, op.getLoc(), type, adaptor.getMemref(),
adaptor.getIndices(), kNoWrapFlags);
rewriter.replaceOpWithNewOp<LLVM::StoreOp>(op, adaptor.getValue(), dataPtr,
- 0, false, op.getNontemporal());
+ op.getAlignment().value_or(0),
+ false, op.getNontemporal());
return success();
}
};
diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 51d56389dac9e..ad9d649590e66 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -753,6 +753,17 @@ func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) {
// -----
+// CHECK-LABEL: func @load_with_alignment(
+// CHECK-INTERFACE-LABEL: func @load_with_alignment(
+func.func @load_with_alignment(%arg0 : memref<32xf32>, %arg1 : index) {
+ // CHECK: llvm.load %{{.*}} {alignment = 32 : i64} : !llvm.ptr -> f32
+ // CHECK-INTERFACE: llvm.load
+ %1 = memref.load %arg0[%arg1] {alignment = 32} : memref<32xf32>
+ func.return
+}
+
+// -----
+
// CHECK-LABEL: func @store_non_temporal(
// CHECK-INTERFACE-LABEL: func @store_non_temporal(
func.func @store_non_temporal(%input : memref<32xf32, affine_map<(d0) -> (d0)>>, %output : memref<32xf32, affine_map<(d0) -> (d0)>>) {
@@ -766,6 +777,17 @@ func.func @store_non_temporal(%input : memref<32xf32, affine_map<(d0) -> (d0)>>,
// -----
+// CHECK-LABEL: func @store_with_alignment(
+// CHECK-INTERFACE-LABEL: func @store_with_alignment(
+func.func @store_with_alignment(%arg0 : memref<32xf32>, %arg1 : f32, %arg2 : index) {
+ // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 32 : i64} : f32, !llvm.ptr
+ // CHECK-INTERFACE: llvm.store
+ memref.store %arg1, %arg0[%arg2] {alignment = 32} : memref<32xf32>
+ func.return
+}
+
+// -----
+
// Ensure unconvertable memory space not cause a crash
// CHECK-LABEL: @alloca_unconvertable_memory_space
More information about the Mlir-commits
mailing list