[Mlir-commits] [mlir] [mlir][vector] Add alignment attribute to `maskedload` and `maskedstore` (PR #151690)

Erick Ochoa Lopez llvmlistbot at llvm.org
Thu Aug 7 11:57:54 PDT 2025


https://github.com/amd-eochoalo updated https://github.com/llvm/llvm-project/pull/151690

>From 96ba831a63cc5534055f3a9ae71cad2dfa8a894b Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 30 Jul 2025 16:19:55 -0400
Subject: [PATCH 01/21] [mlir][vector] Add alignment to maskedload.

---
 .../mlir/Dialect/Vector/IR/VectorOps.td       | 33 ++++++++++++++++++-
 mlir/test/Dialect/Vector/invalid.mlir         | 22 +++++++++++++
 2 files changed, 54 insertions(+), 1 deletion(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 3885439e11f89..60072f9496c9a 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1876,7 +1876,9 @@ def Vector_MaskedLoadOp :
     Arguments<(ins Arg<AnyMemRef, "", [MemRead]>:$base,
                Variadic<Index>:$indices,
                VectorOfNonZeroRankOf<[I1]>:$mask,
-               AnyVectorOfNonZeroRank:$pass_thru)>,
+               AnyVectorOfNonZeroRank:$pass_thru,
+               ConfinedAttr<OptionalAttr<I64Attr>,
+                   [AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)>,
     Results<(outs AnyVectorOfNonZeroRank:$result)> {
 
   let summary = "loads elements from memory into a vector as defined by a mask vector";
@@ -1912,6 +1914,12 @@ def Vector_MaskedLoadOp :
     %1 = vector.maskedload %base[%i, %j], %mask, %pass_thru
        : memref<?x?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
     ```
+
+    An optional `alignment` attribute allows to specify the byte alignment of the
+    load operation. It must be a positive power of 2. The operation must access
+    memory at an address aligned to this boundary. Violations may lead to
+    architecture-specific faults or performance penalties.
+    A value of 0 indicates no specific alignment requirement.
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
@@ -1932,6 +1940,29 @@ def Vector_MaskedLoadOp :
   let hasCanonicalizer = 1;
   let hasFolder = 1;
   let hasVerifier = 1;
+
+  let builders = [
+    OpBuilder<(ins "VectorType":$resultType,
+                   "Value":$base,
+                   "ValueRange":$indices,
+                   "Value":$mask,
+                   "Value":$passthrough,
+                   CArg<"uint64_t", "0">:$alignment), [{
+      return build($_builder, $_state, resultType, base, indices, mask, passthrough,
+                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                                    nullptr);
+    }]>,
+    OpBuilder<(ins "TypeRange":$resultTypes,
+                   "Value":$base,
+                   "ValueRange":$indices,
+                   "Value":$mask,
+                   "Value":$passthrough,
+                   CArg<"uint64_t", "0">:$alignment), [{
+      return build($_builder, $_state, resultTypes, base, indices, mask, passthrough,
+                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                                    nullptr);
+    }]>
+  ];
 }
 
 def Vector_MaskedStoreOp :
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index c21de562d05e1..2efc382ada247 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1305,6 +1305,28 @@ func.func @store_memref_index_mismatch(%base : memref<?xf32>, %value : vector<16
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.maskedload
+//===----------------------------------------------------------------------===//
+
+func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
+  %c0 = arith.constant 0 : index
+  // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  %val = vector.maskedload %base[%c0], %mask, %pass { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  return
+}
+
+// -----
+
+func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
+  %c0 = arith.constant 0 : index
+  // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  %val = vector.maskedload %base[%c0], %mask, %pass { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  return
+}
+
+// -----
+
 func.func @maskedload_base_type_mismatch(%base: memref<?xf64>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
   %c0 = arith.constant 0 : index
   // expected-error at +1 {{'vector.maskedload' op base and result element type should match}}

>From 9958c249dbc597558bdc16bc9ab882a654f1a22f Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 31 Jul 2025 16:04:24 -0400
Subject: [PATCH 02/21] [mlir][vector] Add alignment to maskedstore.

---
 .../mlir/Dialect/Vector/IR/VectorOps.td       | 22 ++++++++++++++++++-
 mlir/test/Dialect/Vector/invalid.mlir         | 22 +++++++++++++++++++
 2 files changed, 43 insertions(+), 1 deletion(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 60072f9496c9a..923214e949f03 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1970,7 +1970,9 @@ def Vector_MaskedStoreOp :
     Arguments<(ins Arg<AnyMemRef, "", [MemWrite]>:$base,
                Variadic<Index>:$indices,
                VectorOfNonZeroRankOf<[I1]>:$mask,
-               AnyVectorOfNonZeroRank:$valueToStore)> {
+               AnyVectorOfNonZeroRank:$valueToStore,
+               ConfinedAttr<OptionalAttr<I64Attr>,
+                   [AllAttrOf<[IntPositive, IntPowerOf2]>]>:$alignment)> {
 
   let summary = "stores elements from a vector into memory as defined by a mask vector";
 
@@ -2005,6 +2007,12 @@ def Vector_MaskedStoreOp :
     vector.maskedstore %base[%i, %j], %mask, %value
       : memref<?x?xf32>, vector<16xi1>, vector<16xf32>
     ```
+
+    An optional `alignment` attribute allows to specify the byte alignment of the
+    store operation. It must be a positive power of 2. The operation must access
+    memory at an address aligned to this boundary. Violations may lead to
+    architecture-specific faults or performance penalties.
+    A value of 0 indicates no specific alignment requirement.
   }];
   let extraClassDeclaration = [{
     MemRefType getMemRefType() {
@@ -2023,6 +2031,18 @@ def Vector_MaskedStoreOp :
   let hasCanonicalizer = 1;
   let hasFolder = 1;
   let hasVerifier = 1;
+
+  let builders = [
+    OpBuilder<(ins "Value":$base,
+                   "ValueRange":$indices,
+                   "Value":$mask,
+                   "Value":$valueToStore,
+                   CArg<"uint64_t", "0">:$alignment), [{
+      return build($_builder, $_state, base, indices, mask, valueToStore,
+                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                                    nullptr);
+    }]>
+  ];
 }
 
 def Vector_GatherOp :
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index 2efc382ada247..b20db0976808c 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1358,6 +1358,28 @@ func.func @maskedload_memref_mismatch(%base: memref<?xf32>, %mask: vector<16xi1>
 
 // -----
 
+//===----------------------------------------------------------------------===//
+// vector.maskedstore
+//===----------------------------------------------------------------------===//
+
+func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>) {
+  %c0 = arith.constant 0 : index
+  // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  vector.maskedstore %base[%c0], %mask, %value { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  return
+}
+
+// -----
+
+func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>) {
+  %c0 = arith.constant 0 : index
+  // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  vector.maskedstore %base[%c0], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  return
+}
+
+// -----
+
 func.func @maskedstore_base_type_mismatch(%base: memref<?xf64>, %mask: vector<16xi1>, %value: vector<16xf32>) {
   %c0 = arith.constant 0 : index
   // expected-error at +1 {{'vector.maskedstore' op base and valueToStore element type should match}}

>From b2f70ffe314075456774e365ea5f1a088b1ebfb1 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 31 Jul 2025 16:20:58 -0400
Subject: [PATCH 03/21] [mlir] Use alignment in VectorToLLVM conversion.

After adding an alignment attribute to:
* vector.load
* vector.store
* vector.maskedload
* vector.maskedstore

The shared pattern used by these operations can use the alignment
attribute to specify the alignment of the vector being loaded or stored.
---
 .../VectorToLLVM/ConvertVectorToLLVM.cpp      |  4 +-
 .../vector-to-llvm-interface.mlir             | 58 +++++++++++++++++++
 2 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 17a79e3815b97..54bf31c379aa0 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -247,8 +247,8 @@ class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
 
     // Resolve alignment.
-    unsigned align;
-    if (failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
+    unsigned align = loadOrStoreOp.getAlignment().value_or(0);
+    if (!align && failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
                                         memRefTy, align, useVectorAlignment)))
       return rewriter.notifyMatchFailure(loadOrStoreOp,
                                          "could not resolve alignment");
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
index 31e17fb3e3cc6..b716a5b784ea2 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
@@ -1679,6 +1679,20 @@ func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vec
 
 // -----
 
+func.func @load_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+  %0 = vector.load %memref[%i, %j] { alignment = 8 } : memref<200x100xf32>, vector<8xf32>
+  return %0 : vector<8xf32>
+}
+
+// CHECK-LABEL: func @load_alignment
+// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64
+// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]]  : i64
+// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}}  : i64
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+// CHECK: llvm.load %[[GEP]] {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
+
+// -----
+
 //===----------------------------------------------------------------------===//
 // vector.store
 //===----------------------------------------------------------------------===//
@@ -1785,6 +1799,21 @@ func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
 
 // -----
 
+func.func @store_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) {
+  %val = arith.constant dense<11.0> : vector<4xf32>
+  vector.store %val, %memref[%i, %j] {alignment = 8} : memref<200x100xf32>, vector<4xf32>
+  return
+}
+
+// CHECK-LABEL: func @store_alignment
+// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64
+// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]]  : i64
+// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}}  : i64
+// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 8 : i64} :  vector<4xf32>, !llvm.ptr
+
+// -----
+
 //===----------------------------------------------------------------------===//
 // vector.maskedload
 //===----------------------------------------------------------------------===//
@@ -1839,6 +1868,21 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
 
 // -----
 
+func.func @masked_load_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
+  %c0 = arith.constant 0: index
+  %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 { alignment = 8 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  return %0 : vector<16xf32>
+}
+
+// CHECK-LABEL: func @masked_load
+// CHECK: %[[CO:.*]] = arith.constant 0 : index
+// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
+// CHECK: return %[[L]] : vector<16xf32>
+
+// -----
+
 //===----------------------------------------------------------------------===//
 // vector.maskedstore
 //===----------------------------------------------------------------------===//
@@ -1891,6 +1935,20 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
 
 // -----
 
+func.func @masked_store_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
+  %c0 = arith.constant 0: index
+  vector.maskedstore %arg0[%c0], %arg1, %arg2 { alignment = 8 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
+  return
+}
+
+// CHECK-LABEL: func @masked_store
+// CHECK: %[[CO:.*]] = arith.constant 0 : index
+// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
+// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
+
+// -----
+
 //===----------------------------------------------------------------------===//
 // vector.gather
 //===----------------------------------------------------------------------===//

>From 0eb1b442055ebbeaad01d05c7fe24bc97c8bbdfb Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 09:48:06 -0400
Subject: [PATCH 04/21] Remove unnecessary checks in test

---
 .../vector-to-llvm-interface.mlir             | 31 +++++--------------
 1 file changed, 8 insertions(+), 23 deletions(-)

diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
index b716a5b784ea2..940f3211f7c25 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
@@ -1685,11 +1685,7 @@ func.func @load_alignment(%memref : memref<200x100xf32>, %i : index, %j : index)
 }
 
 // CHECK-LABEL: func @load_alignment
-// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64
-// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]]  : i64
-// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}}  : i64
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.load %[[GEP]] {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
+// CHECK: llvm.load {{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
 
 // -----
 
@@ -1806,11 +1802,7 @@ func.func @store_alignment(%memref : memref<200x100xf32>, %i : index, %j : index
 }
 
 // CHECK-LABEL: func @store_alignment
-// CHECK: %[[C100:.*]] = llvm.mlir.constant(100 : index) : i64
-// CHECK: %[[MUL:.*]] = llvm.mul %{{.*}}, %[[C100]]  : i64
-// CHECK: %[[ADD:.*]] = llvm.add %[[MUL]], %{{.*}}  : i64
-// CHECK: %[[GEP:.*]] = llvm.getelementptr %{{.*}}[%[[ADD]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.store %{{.*}}, %[[GEP]] {alignment = 8 : i64} :  vector<4xf32>, !llvm.ptr
+// CHECK: llvm.store %{{.*}} {alignment = 8 : i64} :  vector<4xf32>, !llvm.ptr
 
 // -----
 
@@ -1870,16 +1862,12 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
 
 func.func @masked_load_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
   %c0 = arith.constant 0: index
-  %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 { alignment = 8 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return %0 : vector<16xf32>
 }
 
-// CHECK-LABEL: func @masked_load
-// CHECK: %[[CO:.*]] = arith.constant 0 : index
-// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
-// CHECK: return %[[L]] : vector<16xf32>
+// CHECK-LABEL: func @masked_load_alignment
+// CHECK: llvm.intr.masked.load %{{.*}} {alignment = 2 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
 
 // -----
 
@@ -1937,15 +1925,12 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
 
 func.func @masked_store_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
   %c0 = arith.constant 0: index
-  vector.maskedstore %arg0[%c0], %arg1, %arg2 { alignment = 8 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
+  vector.maskedstore %arg0[%c0], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
   return
 }
 
-// CHECK-LABEL: func @masked_store
-// CHECK: %[[CO:.*]] = arith.constant 0 : index
-// CHECK: %[[C:.*]] = builtin.unrealized_conversion_cast %[[CO]] : index to i64
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 8 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
+// CHECK-LABEL: func @masked_store_alignment
+// CHECK: llvm.intr.masked.store %{{.*}} {alignment = 2 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
 
 // -----
 

>From 56c23351e154e949ea2d046f1c1e72502d8a794b Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 09:53:06 -0400
Subject: [PATCH 05/21] Make operations parameters in test

---
 .../vector-to-llvm-interface.mlir             | 13 +++++-------
 mlir/test/Dialect/Vector/invalid.mlir         | 20 ++++++++-----------
 2 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
index 940f3211f7c25..b1fb669bc76ac 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
@@ -1795,8 +1795,7 @@ func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
 
 // -----
 
-func.func @store_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) {
-  %val = arith.constant dense<11.0> : vector<4xf32>
+func.func @store_alignment(%memref : memref<200x100xf32>, %i : index, %j : index, %val : vector<4xf32>) {
   vector.store %val, %memref[%i, %j] {alignment = 8} : memref<200x100xf32>, vector<4xf32>
   return
 }
@@ -1860,9 +1859,8 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
 
 // -----
 
-func.func @masked_load_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
-  %c0 = arith.constant 0: index
-  %0 = vector.maskedload %arg0[%c0], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+func.func @masked_load_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) -> vector<16xf32> {
+  %0 = vector.maskedload %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return %0 : vector<16xf32>
 }
 
@@ -1923,9 +1921,8 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
 
 // -----
 
-func.func @masked_store_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
-  %c0 = arith.constant 0: index
-  vector.maskedstore %arg0[%c0], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
+func.func @masked_store_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) {
+  vector.maskedstore %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
   return
 }
 
diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index b20db0976808c..ac48d67e34c0b 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1309,19 +1309,17 @@ func.func @store_memref_index_mismatch(%base : memref<?xf32>, %value : vector<16
 // vector.maskedload
 //===----------------------------------------------------------------------===//
 
-func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
-  %c0 = arith.constant 0 : index
+func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
   // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  %val = vector.maskedload %base[%c0], %mask, %pass { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  %val = vector.maskedload %base[%index], %mask, %pass { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
 
 // -----
 
-func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>) {
-  %c0 = arith.constant 0 : index
+func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
   // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  %val = vector.maskedload %base[%c0], %mask, %pass { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
 
@@ -1362,19 +1360,17 @@ func.func @maskedload_memref_mismatch(%base: memref<?xf32>, %mask: vector<16xi1>
 // vector.maskedstore
 //===----------------------------------------------------------------------===//
 
-func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>) {
-  %c0 = arith.constant 0 : index
+func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
   // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  vector.maskedstore %base[%c0], %mask, %value { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  vector.maskedstore %base[%index], %mask, %value { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
 
 // -----
 
-func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>) {
-  %c0 = arith.constant 0 : index
+func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
   // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  vector.maskedstore %base[%c0], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
 

>From c0f3d68ba96228ae19275ca576c0839ffddb3609 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 09:57:30 -0400
Subject: [PATCH 06/21] Rename test functions to be consistent

---
 .../VectorToLLVM/vector-to-llvm-interface.mlir   | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
index b1fb669bc76ac..5a424a8ac0d5f 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm-interface.mlir
@@ -1679,12 +1679,12 @@ func.func @load_0d(%memref : memref<200x100xf32>, %i : index, %j : index) -> vec
 
 // -----
 
-func.func @load_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
+func.func @load_with_alignment(%memref : memref<200x100xf32>, %i : index, %j : index) -> vector<8xf32> {
   %0 = vector.load %memref[%i, %j] { alignment = 8 } : memref<200x100xf32>, vector<8xf32>
   return %0 : vector<8xf32>
 }
 
-// CHECK-LABEL: func @load_alignment
+// CHECK-LABEL: func @load_with_alignment
 // CHECK: llvm.load {{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
 
 // -----
@@ -1795,12 +1795,12 @@ func.func @store_0d(%memref : memref<200x100xf32>, %i : index, %j : index) {
 
 // -----
 
-func.func @store_alignment(%memref : memref<200x100xf32>, %i : index, %j : index, %val : vector<4xf32>) {
+func.func @store_with_alignment(%memref : memref<200x100xf32>, %i : index, %j : index, %val : vector<4xf32>) {
   vector.store %val, %memref[%i, %j] {alignment = 8} : memref<200x100xf32>, vector<4xf32>
   return
 }
 
-// CHECK-LABEL: func @store_alignment
+// CHECK-LABEL: func @store_with_alignment
 // CHECK: llvm.store %{{.*}} {alignment = 8 : i64} :  vector<4xf32>, !llvm.ptr
 
 // -----
@@ -1859,12 +1859,12 @@ func.func @masked_load_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16]
 
 // -----
 
-func.func @masked_load_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) -> vector<16xf32> {
+func.func @masked_load_with_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) -> vector<16xf32> {
   %0 = vector.maskedload %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return %0 : vector<16xf32>
 }
 
-// CHECK-LABEL: func @masked_load_alignment
+// CHECK-LABEL: func @masked_load_with_alignment
 // CHECK: llvm.intr.masked.load %{{.*}} {alignment = 2 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
 
 // -----
@@ -1921,12 +1921,12 @@ func.func @masked_store_index_scalable(%arg0: memref<?xindex>, %arg1: vector<[16
 
 // -----
 
-func.func @masked_store_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) {
+func.func @masked_store_with_alignment(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>, %arg3: index) {
   vector.maskedstore %arg0[%arg3], %arg1, %arg2 { alignment = 2 } : memref<?xf32>, vector<16xi1>, vector<16xf32>
   return
 }
 
-// CHECK-LABEL: func @masked_store_alignment
+// CHECK-LABEL: func @masked_store_with_alignment
 // CHECK: llvm.intr.masked.store %{{.*}} {alignment = 2 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
 
 // -----

>From bcdd88a7a44386c8c81158838a37555f9ca357f9 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 10:09:13 -0400
Subject: [PATCH 07/21] Change expected-error at +1 to expected-error at below

---
 mlir/test/Dialect/Vector/invalid.mlir | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index ac48d67e34c0b..a329b80c0b552 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1310,7 +1310,7 @@ func.func @store_memref_index_mismatch(%base : memref<?xf32>, %value : vector<16
 //===----------------------------------------------------------------------===//
 
 func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
-  // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  // expected-error at below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   %val = vector.maskedload %base[%index], %mask, %pass { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
@@ -1318,7 +1318,7 @@ func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16x
 // -----
 
 func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
-  // expected-error at +1 {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  // expected-error at below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
@@ -1361,7 +1361,7 @@ func.func @maskedload_memref_mismatch(%base: memref<?xf32>, %mask: vector<16xi1>
 //===----------------------------------------------------------------------===//
 
 func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
-  // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   vector.maskedstore %base[%index], %mask, %value { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }
@@ -1369,7 +1369,7 @@ func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16
 // -----
 
 func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
-  // expected-error at +1 {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
+  // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return
 }

>From 55c9c3f1ed9b40299f369594ad17436cc647b9a5 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 10:10:10 -0400
Subject: [PATCH 08/21] Change function name

---
 mlir/test/Dialect/Vector/invalid.mlir | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index a329b80c0b552..a493d96b749ed 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1368,7 +1368,7 @@ func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16
 
 // -----
 
-func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
+func.func @maskedstore_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
   // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
   return

>From 3e796817e91e9651053008bba6dd88ea959e44d4 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 10:21:02 -0400
Subject: [PATCH 09/21] Simplify types in test

---
 mlir/test/Dialect/Vector/invalid.mlir | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index a493d96b749ed..213bc342f2d7e 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1309,17 +1309,17 @@ func.func @store_memref_index_mismatch(%base : memref<?xf32>, %value : vector<16
 // vector.maskedload
 //===----------------------------------------------------------------------===//
 
-func.func @maskedload_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
+func.func @maskedload_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  %val = vector.maskedload %base[%index], %mask, %pass { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  %val = vector.maskedload %base[%index], %mask, %pass { alignment = -1 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return
 }
 
 // -----
 
-func.func @maskedload_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %pass: vector<16xf32>, %index: index) {
+func.func @maskedload_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return
 }
 
@@ -1360,17 +1360,17 @@ func.func @maskedload_memref_mismatch(%base: memref<?xf32>, %mask: vector<16xi1>
 // vector.maskedstore
 //===----------------------------------------------------------------------===//
 
-func.func @maskedstore_negative_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
+func.func @maskedstore_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  vector.maskedstore %base[%index], %mask, %value { alignment = -1 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  vector.maskedstore %base[%index], %mask, %value { alignment = -1 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return
 }
 
 // -----
 
-func.func @maskedstore_nonpower2_alignment(%base: memref<?xf32>, %mask: vector<16xi1>, %value: vector<16xf32>, %index: index) {
+func.func @maskedstore_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
-  vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
+  vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return
 }
 

>From 8c65e3ddbe3bc96d4a021fc15743c756f3d98ffb Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 10:40:55 -0400
Subject: [PATCH 10/21] Add AlignmentBytes class

---
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.h  | 8 ++++++++
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 6 +++---
 2 files changed, 11 insertions(+), 3 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index 364c1728715e8..048fd1f937c9e 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -77,6 +77,14 @@ struct VectorDim {
   int64_t dim;
   bool isScalable;
 };
+
+struct AlignmentBytes {
+  uint64_t alignment = 0;
+  AlignmentBytes() = default;
+  explicit AlignmentBytes(uint64_t alignment_) : alignment(alignment_){};
+  operator bool() const { return 0 != alignment; }
+};
+
 BroadcastableToResult
 isBroadcastableTo(Type srcType, VectorType dstVectorType,
                   std::pair<VectorDim, VectorDim> *mismatchingDims = nullptr);
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 923214e949f03..97c7a4607df88 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1729,7 +1729,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
       return build($_builder, $_state, resultType, base, indices, nontemporal,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);
@@ -1738,7 +1738,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, nontemporal,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);
@@ -2037,7 +2037,7 @@ def Vector_MaskedStoreOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$valueToStore,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
       return build($_builder, $_state, base, indices, mask, valueToStore,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);

>From 3a7eeae29c36af4b97dfaf37dc835a24eb79ced8 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 11:57:24 -0400
Subject: [PATCH 11/21] Outline getting alignment from load or store op

---
 .../VectorToLLVM/ConvertVectorToLLVM.cpp      | 24 +++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 54bf31c379aa0..ce3fee9f02b75 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -121,6 +121,25 @@ LogicalResult getVectorToLLVMAlignment(const LLVMTypeConverter &typeConverter,
   return success();
 }
 
+// Helper to resolve the alignment for vector load/store, gather and scatter
+// ops. First, this method will try to obtain the preferred alignment from the
+// load or store operation itself. If the store or load operation does not
+// contain any preferred alignment, then it will get alignment attribute through
+// the type or the backend.
+template <class LoadOrStoreOp>
+LogicalResult getVectorToLLVMAlignment(LoadOrStoreOp loadOrStoreOp,
+                                       const LLVMTypeConverter &typeConverter,
+                                       VectorType vectorType,
+                                       MemRefType memrefType, unsigned &align,
+                                       bool useVectorAlignment) {
+  if (auto alignment = loadOrStoreOp.getAlignment()) {
+    align = alignment.value_or(0);
+    return success();
+  }
+  return getVectorToLLVMAlignment(typeConverter, vectorType, memrefType,
+                                    align, useVectorAlignment);
+}
+
 // Check if the last stride is non-unit and has a valid memory space.
 static LogicalResult isMemRefTypeSupported(MemRefType memRefType,
                                            const LLVMTypeConverter &converter) {
@@ -247,8 +266,9 @@ class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
 
     // Resolve alignment.
-    unsigned align = loadOrStoreOp.getAlignment().value_or(0);
-    if (!align && failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
+    unsigned align;
+    if (failed(getVectorToLLVMAlignment(loadOrStoreOp,
+                                        *this->getTypeConverter(), vectorTy,
                                         memRefTy, align, useVectorAlignment)))
       return rewriter.notifyMatchFailure(loadOrStoreOp,
                                          "could not resolve alignment");

>From 85376c3fea08d04125be0958477906ec46bc1231 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Tue, 5 Aug 2025 12:13:36 -0400
Subject: [PATCH 12/21] Use value instead of value_or

---
 mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index ce3fee9f02b75..9e065948baf68 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -133,7 +133,7 @@ LogicalResult getVectorToLLVMAlignment(LoadOrStoreOp loadOrStoreOp,
                                        MemRefType memrefType, unsigned &align,
                                        bool useVectorAlignment) {
   if (auto alignment = loadOrStoreOp.getAlignment()) {
-    align = alignment.value_or(0);
+    align = alignment.value();
     return success();
   }
   return getVectorToLLVMAlignment(typeConverter, vectorType, memrefType,

>From 125a84edff9e184672fd0028415a8ec87b9a0f30 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 6 Aug 2025 18:46:33 -0700
Subject: [PATCH 13/21] Revert "Use value instead of value_or"

This reverts commit 85376c3fea08d04125be0958477906ec46bc1231.
---
 mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 9e065948baf68..ce3fee9f02b75 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -133,7 +133,7 @@ LogicalResult getVectorToLLVMAlignment(LoadOrStoreOp loadOrStoreOp,
                                        MemRefType memrefType, unsigned &align,
                                        bool useVectorAlignment) {
   if (auto alignment = loadOrStoreOp.getAlignment()) {
-    align = alignment.value();
+    align = alignment.value_or(0);
     return success();
   }
   return getVectorToLLVMAlignment(typeConverter, vectorType, memrefType,

>From 2c8181ed8e1e30b4a479b4774c718188d8e8eaa6 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 6 Aug 2025 18:46:36 -0700
Subject: [PATCH 14/21] Revert "Outline getting alignment from load or store
 op"

This reverts commit 3a7eeae29c36af4b97dfaf37dc835a24eb79ced8.
---
 .../VectorToLLVM/ConvertVectorToLLVM.cpp      | 24 ++-----------------
 1 file changed, 2 insertions(+), 22 deletions(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index ce3fee9f02b75..54bf31c379aa0 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -121,25 +121,6 @@ LogicalResult getVectorToLLVMAlignment(const LLVMTypeConverter &typeConverter,
   return success();
 }
 
-// Helper to resolve the alignment for vector load/store, gather and scatter
-// ops. First, this method will try to obtain the preferred alignment from the
-// load or store operation itself. If the store or load operation does not
-// contain any preferred alignment, then it will get alignment attribute through
-// the type or the backend.
-template <class LoadOrStoreOp>
-LogicalResult getVectorToLLVMAlignment(LoadOrStoreOp loadOrStoreOp,
-                                       const LLVMTypeConverter &typeConverter,
-                                       VectorType vectorType,
-                                       MemRefType memrefType, unsigned &align,
-                                       bool useVectorAlignment) {
-  if (auto alignment = loadOrStoreOp.getAlignment()) {
-    align = alignment.value_or(0);
-    return success();
-  }
-  return getVectorToLLVMAlignment(typeConverter, vectorType, memrefType,
-                                    align, useVectorAlignment);
-}
-
 // Check if the last stride is non-unit and has a valid memory space.
 static LogicalResult isMemRefTypeSupported(MemRefType memRefType,
                                            const LLVMTypeConverter &converter) {
@@ -266,9 +247,8 @@ class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
     MemRefType memRefTy = loadOrStoreOp.getMemRefType();
 
     // Resolve alignment.
-    unsigned align;
-    if (failed(getVectorToLLVMAlignment(loadOrStoreOp,
-                                        *this->getTypeConverter(), vectorTy,
+    unsigned align = loadOrStoreOp.getAlignment().value_or(0);
+    if (!align && failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
                                         memRefTy, align, useVectorAlignment)))
       return rewriter.notifyMatchFailure(loadOrStoreOp,
                                          "could not resolve alignment");

>From 6bcf13dbb8527f1fc03439bc93e0243026cebac7 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 6 Aug 2025 18:46:38 -0700
Subject: [PATCH 15/21] Revert "Add AlignmentBytes class"

This reverts commit 8c65e3ddbe3bc96d4a021fc15743c756f3d98ffb.
---
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.h  | 8 --------
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 6 +++---
 2 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index 048fd1f937c9e..364c1728715e8 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -77,14 +77,6 @@ struct VectorDim {
   int64_t dim;
   bool isScalable;
 };
-
-struct AlignmentBytes {
-  uint64_t alignment = 0;
-  AlignmentBytes() = default;
-  explicit AlignmentBytes(uint64_t alignment_) : alignment(alignment_){};
-  operator bool() const { return 0 != alignment; }
-};
-
 BroadcastableToResult
 isBroadcastableTo(Type srcType, VectorType dstVectorType,
                   std::pair<VectorDim, VectorDim> *mismatchingDims = nullptr);
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 97c7a4607df88..923214e949f03 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1729,7 +1729,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
+                   CArg<"uint64_t", "0">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, nontemporal,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);
@@ -1738,7 +1738,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
+                   CArg<"uint64_t", "0">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, nontemporal,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);
@@ -2037,7 +2037,7 @@ def Vector_MaskedStoreOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$valueToStore,
-                   CArg<"AlignmentBytes", "AlignmentBytes()">: $alignment), [{
+                   CArg<"uint64_t", "0">:$alignment), [{
       return build($_builder, $_state, base, indices, mask, valueToStore,
                    alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
                                     nullptr);

>From 577e6125e49e3dfb2aa7a9b269cf4778d87b3fc3 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 6 Aug 2025 18:48:12 -0700
Subject: [PATCH 16/21] Change name of tests

---
 mlir/test/Dialect/Vector/invalid.mlir | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir
index 213bc342f2d7e..a8af38bf7359b 100644
--- a/mlir/test/Dialect/Vector/invalid.mlir
+++ b/mlir/test/Dialect/Vector/invalid.mlir
@@ -1317,7 +1317,7 @@ func.func @maskedload_negative_alignment(%base: memref<4xi32>, %mask: vector<32x
 
 // -----
 
-func.func @maskedload_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
+func.func @maskedload_nonpoweroftwo_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %pass: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedload' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   %val = vector.maskedload %base[%index], %mask, %pass { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return
@@ -1368,7 +1368,7 @@ func.func @maskedstore_negative_alignment(%base: memref<4xi32>, %mask: vector<32
 
 // -----
 
-func.func @maskedstore_negative_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
+func.func @maskedstore_nonpoweroftwo_alignment(%base: memref<4xi32>, %mask: vector<32xi1>, %value: vector<1xi32>, %index: index) {
   // expected-error at below {{'vector.maskedstore' op attribute 'alignment' failed to satisfy constraint: 64-bit signless integer attribute whose value is positive and whose value is a power of two > 0}}
   vector.maskedstore %base[%index], %mask, %value { alignment = 3 } : memref<4xi32>, vector<32xi1>, vector<1xi32> into vector<1xi32>
   return

>From c66101982447b1083541c15b5d25326facec5209 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Wed, 6 Aug 2025 16:34:02 -0700
Subject: [PATCH 17/21] Use llvm::Align

---
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.h  |  1 +
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 12 ++++++------
 2 files changed, 7 insertions(+), 6 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
index 364c1728715e8..63410b8bea747 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.h
@@ -32,6 +32,7 @@
 #include "mlir/Interfaces/ViewLikeInterface.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Alignment.h"
 
 // Pull in all enum type definitions and utility function declarations.
 #include "mlir/Dialect/Vector/IR/VectorEnums.h.inc"
diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 923214e949f03..15e066b8262ac 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1729,18 +1729,18 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, nontemporal,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, nontemporal,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>
   ];
@@ -1847,9 +1847,9 @@ def Vector_StoreOp : Vector_Op<"store", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, valueToStore, base, indices, nontemporal,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>
   ];

>From 638df9aca04f271f6b4994590e2e0b3b64a94534 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 7 Aug 2025 16:25:36 -0700
Subject: [PATCH 18/21] style

---
 mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 54bf31c379aa0..f9e2a01dbf969 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -248,7 +248,8 @@ class VectorLoadStoreConversion : public ConvertOpToLLVMPattern<LoadOrStoreOp> {
 
     // Resolve alignment.
     unsigned align = loadOrStoreOp.getAlignment().value_or(0);
-    if (!align && failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
+    if (!align &&
+        failed(getVectorToLLVMAlignment(*this->getTypeConverter(), vectorTy,
                                         memRefTy, align, useVectorAlignment)))
       return rewriter.notifyMatchFailure(loadOrStoreOp,
                                          "could not resolve alignment");

>From d5f19c3baceeb41804c2fe7b4206737d81ab271d Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 7 Aug 2025 16:28:05 -0700
Subject: [PATCH 19/21] Use llvm::Align with masked operations

---
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 15e066b8262ac..8b6625799d915 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1947,9 +1947,9 @@ def Vector_MaskedLoadOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$passthrough,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, mask, passthrough,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
@@ -1957,9 +1957,9 @@ def Vector_MaskedLoadOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$passthrough,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, mask, passthrough,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>
   ];
@@ -2037,9 +2037,9 @@ def Vector_MaskedStoreOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$valueToStore,
-                   CArg<"uint64_t", "0">:$alignment), [{
+                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
       return build($_builder, $_state, base, indices, mask, valueToStore,
-                   alignment != 0 ? $_builder.getI64IntegerAttr(alignment) :
+                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
                                     nullptr);
     }]>
   ];

>From 403a114a478e8b6453772ac43f154489cba0fff2 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 7 Aug 2025 19:24:29 -0700
Subject: [PATCH 20/21] Use llvm::MaybeAlign instead of llvm::Align

---
 .../mlir/Dialect/Vector/IR/VectorOps.td       | 24 +++++++++----------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 6a98024e7f226..35c9c1df44466 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1729,18 +1729,18 @@ def Vector_LoadOp : Vector_Op<"load", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, nontemporal,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, nontemporal,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>
   ];
@@ -1847,9 +1847,9 @@ def Vector_StoreOp : Vector_Op<"store", [
                    "Value":$base,
                    "ValueRange":$indices,
                    CArg<"bool", "false">:$nontemporal,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, valueToStore, base, indices, nontemporal,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>
   ];
@@ -1947,9 +1947,9 @@ def Vector_MaskedLoadOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$passthrough,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, mask, passthrough,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
@@ -1957,9 +1957,9 @@ def Vector_MaskedLoadOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$passthrough,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, mask, passthrough,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>
   ];
@@ -2037,9 +2037,9 @@ def Vector_MaskedStoreOp :
                    "ValueRange":$indices,
                    "Value":$mask,
                    "Value":$valueToStore,
-                   CArg<"llvm::Align", "llvm::Align()">:$alignment), [{
+                   CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, base, indices, mask, valueToStore,
-                   alignment != llvm::Align() ? $_builder.getI64IntegerAttr(alignment.value()) :
+                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
                                     nullptr);
     }]>
   ];

>From 8b3a0fde6339bdf588cf797c6519051accb68dd4 Mon Sep 17 00:00:00 2001
From: Erick Ochoa <erick.ochoalopez at amd.com>
Date: Thu, 7 Aug 2025 19:51:26 -0700
Subject: [PATCH 21/21] Apply suggestions from review

---
 mlir/include/mlir/Dialect/Vector/IR/VectorOps.td | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
index 35c9c1df44466..02febb03f562c 100644
--- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
+++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
@@ -1731,7 +1731,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    CArg<"bool", "false">:$nontemporal,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, nontemporal,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
@@ -1740,7 +1740,7 @@ def Vector_LoadOp : Vector_Op<"load", [
                    CArg<"bool", "false">:$nontemporal,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, nontemporal,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>
   ];
@@ -1849,7 +1849,7 @@ def Vector_StoreOp : Vector_Op<"store", [
                    CArg<"bool", "false">:$nontemporal,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, valueToStore, base, indices, nontemporal,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>
   ];
@@ -1949,7 +1949,7 @@ def Vector_MaskedLoadOp :
                    "Value":$passthrough,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultType, base, indices, mask, passthrough,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>,
     OpBuilder<(ins "TypeRange":$resultTypes,
@@ -1959,7 +1959,7 @@ def Vector_MaskedLoadOp :
                    "Value":$passthrough,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, resultTypes, base, indices, mask, passthrough,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>
   ];
@@ -2039,7 +2039,7 @@ def Vector_MaskedStoreOp :
                    "Value":$valueToStore,
                    CArg<"llvm::MaybeAlign", "llvm::MaybeAlign()">:$alignment), [{
       return build($_builder, $_state, base, indices, mask, valueToStore,
-                   alignment != llvm::MaybeAlign() ? $_builder.getI64IntegerAttr(alignment.valueOrOne().value()) :
+                   alignment.has_value() ? $_builder.getI64IntegerAttr((*alignment).value()) :
                                     nullptr);
     }]>
   ];



More information about the Mlir-commits mailing list