[llvm] [AMDGPU] Fix edge case of buffer OOB handling (PR #115479)

Piotr Sobczak via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 24 05:26:55 PST 2025


https://github.com/piotrAMD updated https://github.com/llvm/llvm-project/pull/115479

>From 9099a051d9b3ce7d9eb36ed3def65d3b1a4dd338 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Fri, 8 Nov 2024 12:21:43 +0100
Subject: [PATCH 1/9] [AMDGPU] Add target feature
 require-naturally-aligned-buffer-access

Add a new target feature require-naturally-aligned-buffer-access to guarantee robust
out-of-bounds behavior. When set, it will disallow buffer accesses with alignment
lower than natural alignment.

This is needed to specifically address the edge case where an access starts
out-of-bounds and then enter in-bounds, as the hardware would treat the entire access
as out-of-bounds. This is normally not needed for most users (hence the target
feature), but at least one graphics device extension (VK_EXT_robustness2) has
very strict requirements - in-bounds accesses must return correct value,
and out-of-bounds accesses must return zero.

The direct result of the patch is that, when the new target feature is set, a buffer
access at a negative address will not be merged with one at a positive address.
---
 llvm/lib/Target/AMDGPU/AMDGPU.td              |  6 ++
 llvm/lib/Target/AMDGPU/GCNSubtarget.h         |  5 ++
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 14 ++++
 .../AMDGPU/unaligned-buffer.ll                | 80 +++++++++++++++++++
 4 files changed, 105 insertions(+)
 create mode 100644 llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index bde61a1f7e58d..8a184a92f016e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -119,6 +119,12 @@ def FeatureUnalignedDSAccess : SubtargetFeature<"unaligned-ds-access",
   "Hardware supports unaligned local and region loads and stores"
 >;
 
+def FeatureRequireNaturallyAlignedBufferAccess : SubtargetFeature<"require-naturally-aligned-buffer-access",
+  "RequireNaturallyAlignedBufferAccess",
+  "true",
+  "Requires natural alignment of buffer accesses"
+>;
+
 def FeatureApertureRegs : SubtargetFeature<"aperture-regs",
   "HasApertureRegs",
   "true",
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 6ff964077d8fd..541e3c0f399e3 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -76,6 +76,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
   bool BackOffBarrier = false;
   bool UnalignedScratchAccess = false;
   bool UnalignedAccessMode = false;
+  bool RequireNaturallyAlignedBufferAccess = false;
   bool HasApertureRegs = false;
   bool SupportsXNACK = false;
   bool KernargPreload = false;
@@ -600,6 +601,10 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
     return UnalignedAccessMode;
   }
 
+  bool requiresNaturallyAlignedBufferAccess() const {
+    return RequireNaturallyAlignedBufferAccess;
+  }
+
   bool hasApertureRegs() const {
     return HasApertureRegs;
   }
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 419414e5bd993..d4321eb682dd9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1840,6 +1840,20 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
            Subtarget->hasUnalignedBufferAccessEnabled();
   }
 
+  // Check natural alignment of buffer if the target requires it. This is needed
+  // only if robust out-of-bounds guarantees are needed. Normally hardware will
+  // ensure proper out-of-bounds behavior, but in the edge case where an access
+  // starts out-of-bounds and then enter in-bounds, the entire access would be
+  // treated as out-of-bounds. Requiring the natural alignment avoids the
+  // problem.
+  if (AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
+      AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
+      AddrSpace == AMDGPUAS::BUFFER_STRIDED_POINTER) {
+    if (Subtarget->requiresNaturallyAlignedBufferAccess() &&
+        Alignment < Align(PowerOf2Ceil(divideCeil(Size, 8))))
+      return false;
+  }
+
   // Smaller than dword value must be aligned.
   if (Size < 32)
     return false;
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
new file mode 100644
index 0000000000000..0d8a98feecb82
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -mattr=+require-naturally-aligned-buffer-access -S -o - %s | FileCheck --check-prefix=ALIGNED %s
+; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefixes=UNALIGNED %s
+
+; The test checks that require-naturally-aligned-buffer-access target feature prevents merging loads if the target load would not be naturally aligned.
+
+define amdgpu_kernel void @merge_align_4(ptr addrspace(7) nocapture %p, ptr addrspace(7) nocapture %p2) #0 {
+;
+; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
+; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) #[[ATTR0:[0-9]+]] {
+; ALIGNED-NEXT:  [[ENTRY:.*:]]
+; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; ALIGNED-NEXT:    [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
+; ALIGNED-NEXT:    [[GEP_M4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -4
+; ALIGNED-NEXT:    [[LD_M4:%.*]] = load i32, ptr addrspace(7) [[GEP_M4]], align 4
+; ALIGNED-NEXT:    [[GEP_0:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 0
+; ALIGNED-NEXT:    [[LD_0:%.*]] = load i32, ptr addrspace(7) [[GEP_0]], align 4
+; ALIGNED-NEXT:    [[GEP_4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i64 4
+; ALIGNED-NEXT:    [[LD_4:%.*]] = load i32, ptr addrspace(7) [[GEP_4]], align 4
+; ALIGNED-NEXT:    ret void
+;
+; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
+; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) {
+; UNALIGNED-NEXT:  [[ENTRY:.*:]]
+; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
+; UNALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; UNALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; UNALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; UNALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; UNALIGNED-NEXT:    ret void
+;
+entry:
+  %gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
+  %ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 4
+  %gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
+  %ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
+  %gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
+  %ld_0 = load i32, ptr addrspace(7) %gep_0, align 4
+  %gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
+  %ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
+  ret void
+}
+
+; The test checks that require-naturally-aligned-buffer-access target feature does not prevent merging loads if the target load would be naturally aligned.
+
+define amdgpu_kernel void @merge_align_16(ptr addrspace(7) nocapture %p, ptr addrspace(7) nocapture %p2) #0 {
+; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
+; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) #[[ATTR0]] {
+; ALIGNED-NEXT:  [[ENTRY:.*:]]
+; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; ALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
+; ALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; ALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; ALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; ALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; ALIGNED-NEXT:    ret void
+;
+; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
+; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) {
+; UNALIGNED-NEXT:  [[ENTRY:.*:]]
+; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
+; UNALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; UNALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; UNALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; UNALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; UNALIGNED-NEXT:    ret void
+;
+entry:
+  %gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
+  %ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 16
+  %gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
+  %ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
+  %gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
+  %ld_0 = load i32, ptr addrspace(7) %gep_0, align 8
+  %gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
+  %ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
+  ret void
+}

>From b78e15ba5b6d969a3b5ccd7e8c653ec944ff26bf Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Fri, 8 Nov 2024 17:49:28 +0100
Subject: [PATCH 2/9] Rename function.

---
 llvm/lib/Target/AMDGPU/GCNSubtarget.h     | 2 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 541e3c0f399e3..56a027febcac0 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -601,7 +601,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
     return UnalignedAccessMode;
   }
 
-  bool requiresNaturallyAlignedBufferAccess() const {
+  bool hasRequireNaturallyAlignedBufferAccess() const {
     return RequireNaturallyAlignedBufferAccess;
   }
 
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d4321eb682dd9..adc9c9224000d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1849,7 +1849,7 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
   if (AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
       AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
       AddrSpace == AMDGPUAS::BUFFER_STRIDED_POINTER) {
-    if (Subtarget->requiresNaturallyAlignedBufferAccess() &&
+    if (Subtarget->hasRequireNaturallyAlignedBufferAccess() &&
         Alignment < Align(PowerOf2Ceil(divideCeil(Size, 8))))
       return false;
   }

>From adbd8b3c09046607a983c56ff7b2dac788d2ae73 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Thu, 21 Nov 2024 15:51:25 +0100
Subject: [PATCH 3/9] Simplify test

---
 .../LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll   | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
index 0d8a98feecb82..980fc739780d1 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
@@ -4,10 +4,10 @@
 
 ; The test checks that require-naturally-aligned-buffer-access target feature prevents merging loads if the target load would not be naturally aligned.
 
-define amdgpu_kernel void @merge_align_4(ptr addrspace(7) nocapture %p, ptr addrspace(7) nocapture %p2) #0 {
+define amdgpu_kernel void @merge_align_4(ptr addrspace(7) nocapture %p) #0 {
 ;
 ; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
-; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) #[[ATTR0:[0-9]+]] {
+; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]]) #[[ATTR0:[0-9]+]] {
 ; ALIGNED-NEXT:  [[ENTRY:.*:]]
 ; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
 ; ALIGNED-NEXT:    [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
@@ -20,7 +20,7 @@ define amdgpu_kernel void @merge_align_4(ptr addrspace(7) nocapture %p, ptr addr
 ; ALIGNED-NEXT:    ret void
 ;
 ; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
-; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) {
+; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]]) {
 ; UNALIGNED-NEXT:  [[ENTRY:.*:]]
 ; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
 ; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
@@ -44,9 +44,9 @@ entry:
 
 ; The test checks that require-naturally-aligned-buffer-access target feature does not prevent merging loads if the target load would be naturally aligned.
 
-define amdgpu_kernel void @merge_align_16(ptr addrspace(7) nocapture %p, ptr addrspace(7) nocapture %p2) #0 {
+define amdgpu_kernel void @merge_align_16(ptr addrspace(7) nocapture %p) #0 {
 ; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
-; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) #[[ATTR0]] {
+; ALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]]) #[[ATTR0]] {
 ; ALIGNED-NEXT:  [[ENTRY:.*:]]
 ; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
 ; ALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
@@ -57,7 +57,7 @@ define amdgpu_kernel void @merge_align_16(ptr addrspace(7) nocapture %p, ptr add
 ; ALIGNED-NEXT:    ret void
 ;
 ; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
-; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]], ptr addrspace(7) nocapture [[P2:%.*]]) {
+; UNALIGNED-SAME: ptr addrspace(7) nocapture [[P:%.*]]) {
 ; UNALIGNED-NEXT:  [[ENTRY:.*:]]
 ; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
 ; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16

>From 52b3d959cc5b34f25b7a9ff6c1591bc907dae1a4 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Thu, 21 Nov 2024 16:03:44 +0100
Subject: [PATCH 4/9] Add codegen test for problems with underaligned memory
 accesses

---
 llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll | 51 ++++++++++++++++++++
 1 file changed, 51 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll

diff --git a/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
new file mode 100644
index 0000000000000..7331cbd25c815
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck %s
+
+; Check that with require-naturally-aligned-buffer-access attribute, the underaligned loads and stores get split.
+; FIXME: The loads/stores do not get split (extend amdgpu-lower-buffer-fat-pointers?).
+
+define amdgpu_ps void @split_underaligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
+; CHECK-LABEL: split_underaligned_load:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    v_mov_b32_e32 v0, s4
+; CHECK-NEXT:    v_mov_b32_e32 v2, s9
+; CHECK-NEXT:    s_mov_b32 s15, s8
+; CHECK-NEXT:    s_mov_b32 s14, s7
+; CHECK-NEXT:    s_mov_b32 s13, s6
+; CHECK-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; CHECK-NEXT:    s_mov_b32 s12, s5
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; CHECK-NEXT:    s_endpgm
+entry:
+  %gep = getelementptr i8, ptr addrspace(7) %p, i32 0
+  %ld = load i64, ptr addrspace(7) %gep, align 4
+
+  %gep2 = getelementptr i8, ptr addrspace(7) %p2, i32 0
+  store i64 %ld, ptr addrspace(7) %gep2, align 4
+  ret void
+}
+
+; Check that even with require-naturally-aligned-buffer-access attribute, the naturally aligned loads and stores do not get split.
+
+define amdgpu_ps void @do_not_split_aligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
+; CHECK-LABEL: do_not_split_aligned_load:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    v_mov_b32_e32 v0, s4
+; CHECK-NEXT:    v_mov_b32_e32 v2, s9
+; CHECK-NEXT:    s_mov_b32 s15, s8
+; CHECK-NEXT:    s_mov_b32 s14, s7
+; CHECK-NEXT:    s_mov_b32 s13, s6
+; CHECK-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; CHECK-NEXT:    s_mov_b32 s12, s5
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; CHECK-NEXT:    s_endpgm
+entry:
+  %gep = getelementptr i8, ptr addrspace(7) %p, i32 0
+  %ld = load i64, ptr addrspace(7) %gep, align 8
+
+  %gep2 = getelementptr i8, ptr addrspace(7) %p2, i32 0
+  store i64 %ld, ptr addrspace(7) %gep2, align 8
+  ret void
+}

>From 13201b56751bceba920a32f75b2c1c67ccf389ea Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Sun, 24 Nov 2024 09:05:00 +0100
Subject: [PATCH 5/9] Handle globalisel in codegen test

---
 llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll | 53 +++++++++++++++++++-
 1 file changed, 52 insertions(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
index 7331cbd25c815..17efe2bf6744a 100644
--- a/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck -check-prefix=SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck -check-prefix=GISEL %s
 
 ; Check that with require-naturally-aligned-buffer-access attribute, the underaligned loads and stores get split.
 ; FIXME: The loads/stores do not get split (extend amdgpu-lower-buffer-fat-pointers?).
@@ -17,6 +18,31 @@ define amdgpu_ps void @split_underaligned_load(ptr addrspace(7) inreg %p, ptr ad
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
 ; CHECK-NEXT:    s_endpgm
+; SDAG-LABEL: split_underaligned_load:
+; SDAG:       ; %bb.0: ; %entry
+; SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; SDAG-NEXT:    v_mov_b32_e32 v2, s9
+; SDAG-NEXT:    s_mov_b32 s15, s8
+; SDAG-NEXT:    s_mov_b32 s14, s7
+; SDAG-NEXT:    s_mov_b32 s13, s6
+; SDAG-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; SDAG-NEXT:    s_mov_b32 s12, s5
+; SDAG-NEXT:    s_waitcnt vmcnt(0)
+; SDAG-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; SDAG-NEXT:    s_endpgm
+;
+; GISEL-LABEL: split_underaligned_load:
+; GISEL:       ; %bb.0: ; %entry
+; GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GISEL-NEXT:    v_mov_b32_e32 v2, s9
+; GISEL-NEXT:    s_mov_b32 s12, s5
+; GISEL-NEXT:    s_mov_b32 s13, s6
+; GISEL-NEXT:    s_mov_b32 s14, s7
+; GISEL-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; GISEL-NEXT:    s_mov_b32 s15, s8
+; GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GISEL-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; GISEL-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i8, ptr addrspace(7) %p, i32 0
   %ld = load i64, ptr addrspace(7) %gep, align 4
@@ -41,6 +67,31 @@ define amdgpu_ps void @do_not_split_aligned_load(ptr addrspace(7) inreg %p, ptr
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
 ; CHECK-NEXT:    s_endpgm
+; SDAG-LABEL: do_not_split_aligned_load:
+; SDAG:       ; %bb.0: ; %entry
+; SDAG-NEXT:    v_mov_b32_e32 v0, s4
+; SDAG-NEXT:    v_mov_b32_e32 v2, s9
+; SDAG-NEXT:    s_mov_b32 s15, s8
+; SDAG-NEXT:    s_mov_b32 s14, s7
+; SDAG-NEXT:    s_mov_b32 s13, s6
+; SDAG-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; SDAG-NEXT:    s_mov_b32 s12, s5
+; SDAG-NEXT:    s_waitcnt vmcnt(0)
+; SDAG-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; SDAG-NEXT:    s_endpgm
+;
+; GISEL-LABEL: do_not_split_aligned_load:
+; GISEL:       ; %bb.0: ; %entry
+; GISEL-NEXT:    v_mov_b32_e32 v0, s4
+; GISEL-NEXT:    v_mov_b32_e32 v2, s9
+; GISEL-NEXT:    s_mov_b32 s12, s5
+; GISEL-NEXT:    s_mov_b32 s13, s6
+; GISEL-NEXT:    s_mov_b32 s14, s7
+; GISEL-NEXT:    buffer_load_b64 v[0:1], v0, s[0:3], 0 offen
+; GISEL-NEXT:    s_mov_b32 s15, s8
+; GISEL-NEXT:    s_waitcnt vmcnt(0)
+; GISEL-NEXT:    buffer_store_b64 v[0:1], v2, s[12:15], 0 offen
+; GISEL-NEXT:    s_endpgm
 entry:
   %gep = getelementptr i8, ptr addrspace(7) %p, i32 0
   %ld = load i64, ptr addrspace(7) %gep, align 8

>From f8feeaba7dc991e5565f282f532249898ebd46ee Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Sun, 24 Nov 2024 14:54:32 +0100
Subject: [PATCH 6/9] Add more description

---
 llvm/lib/Target/AMDGPU/AMDGPU.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 8a184a92f016e..aea9601929014 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -122,7 +122,7 @@ def FeatureUnalignedDSAccess : SubtargetFeature<"unaligned-ds-access",
 def FeatureRequireNaturallyAlignedBufferAccess : SubtargetFeature<"require-naturally-aligned-buffer-access",
   "RequireNaturallyAlignedBufferAccess",
   "true",
-  "Requires natural alignment of buffer accesses"
+  "Requires natural alignment of buffer accesses to achieve robust out-of-bounds behavior, ensuring that accesses starting out-of-bounds remain distinct from those starting in-bounds"
 >;
 
 def FeatureApertureRegs : SubtargetFeature<"aperture-regs",

>From 26d046beb4de1158f8e3dc06c247ce1e2d512507 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Thu, 13 Feb 2025 23:36:06 +0100
Subject: [PATCH 7/9] Invert target feature

---
 llvm/lib/Target/AMDGPU/AMDGPU.td              |  8 +-
 llvm/lib/Target/AMDGPU/GCNSubtarget.h         |  6 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 14 +--
 llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll  |  8 +-
 .../AMDGPU/vectorize-buffer-fat-pointer.ll    | 20 ++--
 .../AMDGPU/merge-vectors.ll                   | 14 ++-
 .../AMDGPU/unaligned-buffer.ll                | 92 +++++++++----------
 7 files changed, 83 insertions(+), 79 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 26d1211dca386..5df880e6dac4d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -119,10 +119,10 @@ def FeatureUnalignedDSAccess : SubtargetFeature<"unaligned-ds-access",
   "Hardware supports unaligned local and region loads and stores"
 >;
 
-def FeatureRequireNaturallyAlignedBufferAccess : SubtargetFeature<"require-naturally-aligned-buffer-access",
-  "RequireNaturallyAlignedBufferAccess",
-  "true",
-  "Requires natural alignment of buffer accesses to achieve robust out-of-bounds behavior, ensuring that accesses starting out-of-bounds remain distinct from those starting in-bounds"
+def FeatureRelaxedBufferOOBMode : SubtargetFeature<"relaxed-buffer-oob-mode",
+  "RelaxedBufferOOBMode",
+   "true",
+  "Enable relaxed out-of-bounds behavior for buffer accesses"
 >;
 
 def FeatureApertureRegs : SubtargetFeature<"aperture-regs",
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 7827dc7555486..d65d8cf7ec9f4 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -78,7 +78,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
   bool BackOffBarrier = false;
   bool UnalignedScratchAccess = false;
   bool UnalignedAccessMode = false;
-  bool RequireNaturallyAlignedBufferAccess = false;
+  bool RelaxedBufferOOBMode = false;
   bool HasApertureRegs = false;
   bool SupportsXNACK = false;
   bool KernargPreload = false;
@@ -609,9 +609,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
     return UnalignedAccessMode;
   }
 
-  bool hasRequireNaturallyAlignedBufferAccess() const {
-    return RequireNaturallyAlignedBufferAccess;
-  }
+  bool hasRelaxedBufferOOBMode() const { return RelaxedBufferOOBMode; }
 
   bool hasApertureRegs() const {
     return HasApertureRegs;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1498f6bd63f25..de60bce15c073 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1877,16 +1877,16 @@ bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
            Subtarget->hasUnalignedBufferAccessEnabled();
   }
 
-  // Check natural alignment of buffer if the target requires it. This is needed
-  // only if robust out-of-bounds guarantees are needed. Normally hardware will
-  // ensure proper out-of-bounds behavior, but in the edge case where an access
-  // starts out-of-bounds and then enter in-bounds, the entire access would be
-  // treated as out-of-bounds. Requiring the natural alignment avoids the
-  // problem.
+  // Ensure robust out-of-bounds guarantees for buffer accesses are met if
+  // RelaxedBufferOOBMode is disabled. Normally hardware will ensure proper
+  // out-of-bounds behavior, but in the edge case where an access starts
+  // out-of-bounds and then enter in-bounds, the entire access would be treated
+  // as out-of-bounds. Prevent misaligned memory accesses by requiring the
+  // natural alignment of buffer accesses.
   if (AddrSpace == AMDGPUAS::BUFFER_FAT_POINTER ||
       AddrSpace == AMDGPUAS::BUFFER_RESOURCE ||
       AddrSpace == AMDGPUAS::BUFFER_STRIDED_POINTER) {
-    if (Subtarget->hasRequireNaturallyAlignedBufferAccess() &&
+    if (!Subtarget->hasRelaxedBufferOOBMode() &&
         Alignment < Align(PowerOf2Ceil(divideCeil(Size, 8))))
       return false;
   }
diff --git a/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
index 17efe2bf6744a..72c6010a5a80b 100644
--- a/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/unaligned-buffer.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck -check-prefix=SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+require-naturally-aligned-buffer-access  < %s | FileCheck -check-prefix=GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=GISEL %s
 
-; Check that with require-naturally-aligned-buffer-access attribute, the underaligned loads and stores get split.
+; Check that in strict OOB mode for buffers (relaxed-buffer-oob-mode attribute not set) the underaligned loads and stores get split.
 ; FIXME: The loads/stores do not get split (extend amdgpu-lower-buffer-fat-pointers?).
 
 define amdgpu_ps void @split_underaligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
@@ -52,7 +52,7 @@ entry:
   ret void
 }
 
-; Check that even with require-naturally-aligned-buffer-access attribute, the naturally aligned loads and stores do not get split.
+; Check that in strict OOB mode for buffers (relaxed-buffer-oob-mode attribute not set) the naturally aligned loads and stores do not get split.
 
 define amdgpu_ps void @do_not_split_aligned_load(ptr addrspace(7) inreg %p, ptr addrspace(7) inreg %p2) #0 {
 ; CHECK-LABEL: do_not_split_aligned_load:
diff --git a/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll b/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
index 4aab097229a47..f34617e6efc55 100644
--- a/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/vectorize-buffer-fat-pointer.ll
@@ -7,11 +7,11 @@ entry:
   %a2 = getelementptr i32, ptr addrspace(7) %out, i32 2
   %a3 = getelementptr i32, ptr addrspace(7) %out, i32 3
 
-; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr addrspace(7) %out, align 4
-  store i32 0, ptr addrspace(7) %out
-  store i32 1, ptr addrspace(7) %a1
-  store i32 2, ptr addrspace(7) %a2
-  store i32 3, ptr addrspace(7) %a3
+; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr addrspace(7) %out, align 16
+  store i32 0, ptr addrspace(7) %out, align 16
+  store i32 1, ptr addrspace(7) %a1, align 4
+  store i32 2, ptr addrspace(7) %a2, align 8
+  store i32 3, ptr addrspace(7) %a3, align 4
   ret void
 }
 
@@ -22,10 +22,10 @@ entry:
   %a2 = getelementptr i32, ptr addrspace(9) %out, i32 2
   %a3 = getelementptr i32, ptr addrspace(9) %out, i32 3
 
-; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr addrspace(9) %out, align 4
-  store i32 0, ptr addrspace(9) %out
-  store i32 1, ptr addrspace(9) %a1
-  store i32 2, ptr addrspace(9) %a2
-  store i32 3, ptr addrspace(9) %a3
+; OPT: store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, ptr addrspace(9) %out, align 16
+  store i32 0, ptr addrspace(9) %out, align 16
+  store i32 1, ptr addrspace(9) %a1, align 4
+  store i32 2, ptr addrspace(9) %a2, align 8
+  store i32 3, ptr addrspace(9) %a3, align 4
   ret void
 }
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
index 07958f1c1a296..ede2e4066c263 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-vectors.ll
@@ -1,4 +1,5 @@
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -mattr=+relaxed-buffer-oob-mode -S -o - %s | FileCheck --check-prefixes=CHECK,CHECK-OOB-RELAXED %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefixes=CHECK,CHECK-OOB-STRICT %s
 
 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-ni:7"
 
@@ -72,9 +73,14 @@ entry:
   ret void
 }
 
-; CHECK-LABEL: @merge_fat_ptrs(
-; CHECK: load <4 x i16>
-; CHECK: store <4 x i16> zeroinitializer
+; CHECK-OOB-RELAXED-LABEL: @merge_fat_ptrs(
+; CHECK-OOB-RELAXED: load <4 x i16>
+; CHECK-OOB-RELAXED: store <4 x i16> zeroinitializer
+; CHECK-OOB-STRICT-LABEL: @merge_fat_ptrs(
+; CHECK-OOB-STRICT: load <2 x i16>
+; CHECK-OOB-STRICT: load <2 x i16>
+; CHECK-OOB-STRICT: store <2 x i16> zeroinitializer
+; CHECK-OOB-STRICT: store <2 x i16> zeroinitializer
 define amdgpu_kernel void @merge_fat_ptrs(ptr addrspace(7) nocapture %a, ptr addrspace(7) nocapture readonly %b) #0 {
 entry:
   %a.1 = getelementptr inbounds <2 x i16>, ptr addrspace(7) %a, i32 1
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
index c82d84837ed5d..d590a4a403fb7 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
@@ -1,34 +1,34 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -mattr=+require-naturally-aligned-buffer-access -S -o - %s | FileCheck --check-prefix=ALIGNED %s
-; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefixes=UNALIGNED %s
+; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefix=OOB-STRICT %s
+; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -mattr=+relaxed-buffer-oob-mode -S -o - %s | FileCheck --check-prefixes=OOB-RELAXED %s
 
-; The test checks that require-naturally-aligned-buffer-access target feature prevents merging loads if the target load would not be naturally aligned.
+; The test checks that relaxed-buffer-oob-mode allows merging loads even if the target load is not naturally aligned.
 
 define amdgpu_kernel void @merge_align_4(ptr addrspace(7) captures(none) %p) #0 {
 ;
-; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
-; ALIGNED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0:[0-9]+]] {
-; ALIGNED-NEXT:  [[ENTRY:.*:]]
-; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
-; ALIGNED-NEXT:    [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
-; ALIGNED-NEXT:    [[GEP_M4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -4
-; ALIGNED-NEXT:    [[LD_M4:%.*]] = load i32, ptr addrspace(7) [[GEP_M4]], align 4
-; ALIGNED-NEXT:    [[GEP_0:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 0
-; ALIGNED-NEXT:    [[LD_0:%.*]] = load i32, ptr addrspace(7) [[GEP_0]], align 4
-; ALIGNED-NEXT:    [[GEP_4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i64 4
-; ALIGNED-NEXT:    [[LD_4:%.*]] = load i32, ptr addrspace(7) [[GEP_4]], align 4
-; ALIGNED-NEXT:    ret void
+; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_4(
+; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
+; OOB-STRICT-NEXT:  [[ENTRY:.*:]]
+; OOB-STRICT-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; OOB-STRICT-NEXT:    [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
+; OOB-STRICT-NEXT:    [[GEP_M4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -4
+; OOB-STRICT-NEXT:    [[LD_M4:%.*]] = load i32, ptr addrspace(7) [[GEP_M4]], align 4
+; OOB-STRICT-NEXT:    [[GEP_0:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 0
+; OOB-STRICT-NEXT:    [[LD_0:%.*]] = load i32, ptr addrspace(7) [[GEP_0]], align 4
+; OOB-STRICT-NEXT:    [[GEP_4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i64 4
+; OOB-STRICT-NEXT:    [[LD_4:%.*]] = load i32, ptr addrspace(7) [[GEP_4]], align 4
+; OOB-STRICT-NEXT:    ret void
 ;
-; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_4(
-; UNALIGNED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
-; UNALIGNED-NEXT:  [[ENTRY:.*:]]
-; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
-; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
-; UNALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
-; UNALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
-; UNALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
-; UNALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
-; UNALIGNED-NEXT:    ret void
+; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_4(
+; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0:[0-9]+]] {
+; OOB-RELAXED-NEXT:  [[ENTRY:.*:]]
+; OOB-RELAXED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; OOB-RELAXED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
+; OOB-RELAXED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; OOB-RELAXED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; OOB-RELAXED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; OOB-RELAXED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; OOB-RELAXED-NEXT:    ret void
 ;
 entry:
   %gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
@@ -42,30 +42,30 @@ entry:
   ret void
 }
 
-; The test checks that require-naturally-aligned-buffer-access target feature does not prevent merging loads if the target load would be naturally aligned.
+; The test checks that strict OOB mode (relaxed-buffer-oob-mode not set) allows merging loads if the target load is naturally aligned.
 
 define amdgpu_kernel void @merge_align_16(ptr addrspace(7) captures(none) %p) #0 {
-; ALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
-; ALIGNED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0]] {
-; ALIGNED-NEXT:  [[ENTRY:.*:]]
-; ALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
-; ALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
-; ALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
-; ALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
-; ALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
-; ALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
-; ALIGNED-NEXT:    ret void
+; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_16(
+; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
+; OOB-STRICT-NEXT:  [[ENTRY:.*:]]
+; OOB-STRICT-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; OOB-STRICT-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
+; OOB-STRICT-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; OOB-STRICT-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; OOB-STRICT-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; OOB-STRICT-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; OOB-STRICT-NEXT:    ret void
 ;
-; UNALIGNED-LABEL: define amdgpu_kernel void @merge_align_16(
-; UNALIGNED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
-; UNALIGNED-NEXT:  [[ENTRY:.*:]]
-; UNALIGNED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
-; UNALIGNED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
-; UNALIGNED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
-; UNALIGNED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
-; UNALIGNED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
-; UNALIGNED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
-; UNALIGNED-NEXT:    ret void
+; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_16(
+; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0]] {
+; OOB-RELAXED-NEXT:  [[ENTRY:.*:]]
+; OOB-RELAXED-NEXT:    [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
+; OOB-RELAXED-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
+; OOB-RELAXED-NEXT:    [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
+; OOB-RELAXED-NEXT:    [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
+; OOB-RELAXED-NEXT:    [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; OOB-RELAXED-NEXT:    [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; OOB-RELAXED-NEXT:    ret void
 ;
 entry:
   %gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8

>From 88346fb3a319c56a6586953f19e8d9beed2ed16e Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Fri, 14 Feb 2025 07:58:40 +0100
Subject: [PATCH 8/9] Remove stray whitespace

---
 llvm/lib/Target/AMDGPU/AMDGPU.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 5df880e6dac4d..f6fb986355960 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -121,7 +121,7 @@ def FeatureUnalignedDSAccess : SubtargetFeature<"unaligned-ds-access",
 
 def FeatureRelaxedBufferOOBMode : SubtargetFeature<"relaxed-buffer-oob-mode",
   "RelaxedBufferOOBMode",
-   "true",
+  "true",
   "Enable relaxed out-of-bounds behavior for buffer accesses"
 >;
 

>From 166dc3a9d876ce3db0a97344cee518e9ad19ef41 Mon Sep 17 00:00:00 2001
From: Piotr Sobczak <piotr.sobczak at amd.com>
Date: Mon, 24 Feb 2025 14:25:33 +0100
Subject: [PATCH 9/9] Improve comment

---
 llvm/lib/Target/AMDGPU/AMDGPU.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index f6fb986355960..4867e84fb9032 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -122,7 +122,7 @@ def FeatureUnalignedDSAccess : SubtargetFeature<"unaligned-ds-access",
 def FeatureRelaxedBufferOOBMode : SubtargetFeature<"relaxed-buffer-oob-mode",
   "RelaxedBufferOOBMode",
   "true",
-  "Enable relaxed out-of-bounds behavior for buffer accesses"
+  "Disable strict out-of-bounds buffer guarantees. An OOB access may potentially cause an adjacent access to be treated as if it were also OOB"
 >;
 
 def FeatureApertureRegs : SubtargetFeature<"aperture-regs",



More information about the llvm-commits mailing list