[llvm] [AMDGPU] Add intrinsics for atomic struct buffer loads (PR #100140)

Jessica Del via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 23 08:19:16 PDT 2024


https://github.com/OutOfCache updated https://github.com/llvm/llvm-project/pull/100140

>From ee9859b5b48d5cadc25aa25563ecf03dead5a703 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Tue, 23 Jul 2024 15:00:03 +0200
Subject: [PATCH 1/2] [AMDGPU] Add intrinsics for atomic struct buffer loads

Similar to https://github.com/llvm/llvm-project/pull/97707,
but for struct buffer loads.

Mark these intrinsics as atomic loads within LLVM to
prevent hoisting out of loops in cases where
the load is considered invariant.
---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  35 ++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |   2 +
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |   4 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   8 +-
 .../llvm.amdgcn.struct.atomic.buffer.load.ll  | 364 ++++++++++++++++++
 ...vm.amdgcn.struct.ptr.atomic.buffer.load.ll | 364 ++++++++++++++++++
 6 files changed, 774 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index ab2620fdcf6b3..8c25467cc5e4b 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1200,6 +1200,23 @@ class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntri
 def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
 def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
 
+class AMDGPUStructAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+  [data_ty],
+  [llvm_v4i32_ty,    // rsrc(SGPR)
+   llvm_i32_ty,      // vindex(VGPR)
+   llvm_i32_ty,      // offset(VGPR/imm, included in bounds checking and swizzling)
+   llvm_i32_ty,      // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+   llvm_i32_ty],     // auxiliary/cachepolicy(imm):
+                     //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
+                     //                bit 3 = swz, bit 4 = scc (gfx90a)
+                     //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
+                     //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
+                     //                bit 6 = swz
+                     //           all: volatile op (bit 31, stripped at lowering)
+  [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+  AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_struct_atomic_buffer_load : AMDGPUStructAtomicBufferLoad;
+
 class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [data_ty],
   [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
@@ -1219,6 +1236,24 @@ class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIn
 def int_amdgcn_struct_ptr_buffer_load_format : AMDGPUStructPtrBufferLoad;
 def int_amdgcn_struct_ptr_buffer_load : AMDGPUStructPtrBufferLoad;
 
+class AMDGPUStructPtrAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+  [data_ty],
+  [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
+   llvm_i32_ty,           // vindex(VGPR)
+   llvm_i32_ty,           // offset(VGPR/imm, included in bounds checking and swizzling)
+   llvm_i32_ty,           // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+   llvm_i32_ty],          // auxiliary/cachepolicy(imm):
+                          //                bit 0 = glc, bit 1 = slc, bit 2 = dlc (gfx10/gfx11),
+                          //                bit 3 = swz, bit 4 = scc (gfx90a)
+                          //        gfx940: bit 0 = sc0, bit 1 = nt, bit 3 = swz, bit 4 = sc1
+                          //        gfx12+: bits [0-2] = th, bits [3-4] = scope,
+                          //                bit 6 = swz
+                          //           all: volatile op (bit 31, stripped at lowering)
+  [IntrArgMemOnly, NoCapture<ArgIndex<0>>,
+   ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+  AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_struct_ptr_atomic_buffer_load : AMDGPUStructPtrAtomicBufferLoad;
+
 class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [],
   [data_ty,          // vdata(VGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 74e93b0620d26..90cee9905feba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7370,6 +7370,8 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
   case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
   case Intrinsic::amdgcn_struct_buffer_load:
   case Intrinsic::amdgcn_struct_ptr_buffer_load:
+  case Intrinsic::amdgcn_struct_atomic_buffer_load:
+  case Intrinsic::amdgcn_struct_ptr_atomic_buffer_load:
     return legalizeBufferLoad(MI, MRI, B, false, false);
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index aa329a58547f3..4a3f327e4c591 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -5020,7 +5020,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     case Intrinsic::amdgcn_struct_buffer_load:
     case Intrinsic::amdgcn_struct_ptr_buffer_load:
     case Intrinsic::amdgcn_struct_tbuffer_load:
-    case Intrinsic::amdgcn_struct_ptr_tbuffer_load: {
+    case Intrinsic::amdgcn_struct_ptr_tbuffer_load:
+    case Intrinsic::amdgcn_struct_atomic_buffer_load:
+    case Intrinsic::amdgcn_struct_ptr_atomic_buffer_load: {
       OpdsMapping[0] = getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
       OpdsMapping[2] = getSGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
       OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7f95442401dbc..8a811f7a7c02d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1278,7 +1278,9 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
         return true;
       }
       case Intrinsic::amdgcn_raw_atomic_buffer_load:
-      case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load: {
+      case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
+      case Intrinsic::amdgcn_struct_atomic_buffer_load:
+      case Intrinsic::amdgcn_struct_ptr_atomic_buffer_load: {
         Info.memVT =
             memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
                                     std::numeric_limits<unsigned>::max());
@@ -8925,7 +8927,9 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
   case Intrinsic::amdgcn_struct_buffer_load:
   case Intrinsic::amdgcn_struct_ptr_buffer_load:
   case Intrinsic::amdgcn_struct_buffer_load_format:
-  case Intrinsic::amdgcn_struct_ptr_buffer_load_format: {
+  case Intrinsic::amdgcn_struct_ptr_buffer_load_format:
+  case Intrinsic::amdgcn_struct_atomic_buffer_load:
+  case Intrinsic::amdgcn_struct_ptr_atomic_buffer_load: {
     const bool IsFormat =
         IntrID == Intrinsic::amdgcn_struct_buffer_load_format ||
         IntrID == Intrinsic::amdgcn_struct_ptr_buffer_load_format;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
new file mode 100644
index 0000000000000..e300c4d2f5a15
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -0,0 +1,364 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB0_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.atomic.buffer.load.i32(<4 x i32> %addr, i32 %index, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_i32_const_idx(<4 x i32> %addr) {
+; CHECK-LABEL: struct_atomic_buffer_load_i32_const_idx:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB1_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB1_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.atomic.buffer.load.i32(<4 x i32> %addr, i32 15, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_i32_off(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_i32_off:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB2_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB2_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.atomic.buffer.load.i32(<4 x i32> %addr, i32 %index, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_i32_soff(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_i32_soff:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB3_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB3_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.atomic.buffer.load.i32(<4 x i32> %addr, i32 %index, i32 4, i32 4, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @struct_atomic_buffer_load_i32_dlc(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_i32_dlc:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB4_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB4_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.atomic.buffer.load.i32(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 4)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_nonatomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_nonatomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_mov_b32 s0, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:  .LBB5_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT:    s_or_b32 s0, s1, s0
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT:    s_cbranch_execnz .LBB5_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.buffer.load.i32(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_i64(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_i64:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB6_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB6_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.zext = zext i32 %id to i64
+  br label %bb1
+bb1:
+  %load = call i64 @llvm.amdgcn.struct.atomic.buffer.load.i64(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i64 %load, %id.zext
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_v2i16(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_v2i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB7_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB7_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <2 x i16> @llvm.amdgcn.struct.atomic.buffer.load.v2i16(<4 x i32> %addr, i32 %index, i32 0, i32 0, i32 1)
+  %bitcast = bitcast <2 x i16> %load to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_v4i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB8_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB8_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i16> @llvm.amdgcn.struct.atomic.buffer.load.v4i16(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 1)
+  %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+  %bitcast = bitcast <2 x i16> %shortened to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_v4i32(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_v4i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB9_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB9_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i32> @llvm.amdgcn.struct.atomic.buffer.load.v4i32(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 1)
+  %extracted = extractelement <4 x i32> %load, i32 3
+  %cmp = icmp eq i32 %extracted, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_atomic_buffer_load_ptr(<4 x i32> %addr, i32 %index) {
+; CHECK-LABEL: struct_atomic_buffer_load_ptr:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB10_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    flat_load_b32 v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB10_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call ptr @llvm.amdgcn.struct.atomic.buffer.load.ptr(<4 x i32> %addr, i32 %index, i32 4, i32 0, i32 1)
+  %elem = load i32, ptr %load
+  %cmp = icmp eq i32 %elem, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.struct.atom.buffer.load.i32(<4 x i32>, i32, i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.struct.atom.buffer.load.i64(<4 x i32>, i32, i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.struct.atom.buffer.load.v2i16(<4 x i32>, i32, i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.struct.atom.buffer.load.v4i16(<4 x i32>, i32, i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.struct.atom.buffer.load.v4i32(<4 x i32>, i32, i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.struct.atom.buffer.load.ptr(<4 x i32>, i32, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.struct.buffer.load.i32(<4 x i32>, i32, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
new file mode 100644
index 0000000000000..bc300c9f6c60e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -0,0 +1,364 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB0_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB1_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB1_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 15, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB2_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB2_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB3_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB3_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 4, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB4_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB4_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 4)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_mov_b32 s0, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:  .LBB5_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT:    s_or_b32 s0, s1, s0
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT:    s_cbranch_execnz .LBB5_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB6_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB6_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.zext = zext i32 %id to i64
+  br label %bb1
+bb1:
+  %load = call i64 @llvm.amdgcn.struct.ptr.atomic.buffer.load.i64(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i64 %load, %id.zext
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB7_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB7_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <2 x i16> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 %index, i32 0, i32 0, i32 1)
+  %bitcast = bitcast <2 x i16> %load to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB8_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v2, 0xffff, v2
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_lshl_or_b32 v2, v3, 16, v2
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB8_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i16> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
+  %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+  %bitcast = bitcast <2 x i16> %shortened to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB9_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB9_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i32> @llvm.amdgcn.struct.ptr.atomic.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
+  %extracted = extractelement <4 x i32> %load, i32 3
+  %cmp = icmp eq i32 %extracted, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) {
+; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_clause 0x1
+; CHECK-NEXT:    s_load_b32 s4, s[2:3], 0x34
+; CHECK-NEXT:    s_load_b128 s[0:3], s[2:3], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_and_b32 v0, 0x3ff, v0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB10_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    flat_load_b32 v2, v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB10_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call ptr @llvm.amdgcn.struct.ptr.atomic.buffer.load.ptr(ptr addrspace(8) %ptr, i32 %index, i32 4, i32 0, i32 1)
+  %elem = load i32, ptr %load
+  %cmp = icmp eq i32 %elem, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.struct.ptr.atom.buffer.load.i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.struct.ptr.atom.buffer.load.i64(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.struct.ptr.atom.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.struct.ptr.atom.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.struct.ptr.atom.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.struct.ptr.atom.buffer.load.ptr(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.struct.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()

>From e315fb2f16c27e6c82605239022694faf375e17b Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Tue, 23 Jul 2024 17:18:26 +0200
Subject: [PATCH 2/2] fixup! [AMDGPU] Add intrinsics for atomic struct buffer
 loads

---
 .../CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll  | 5 ++---
 .../AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll      | 5 ++---
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
index e300c4d2f5a15..2a979976d806c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=CHECK
+; RUN: llc -global-isel=1 -march=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=CHECK
 
 define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
 ; CHECK-LABEL: struct_atomic_buffer_load_i32:
@@ -353,7 +353,6 @@ bb2:
   ret void
 }
 
-; Function Attrs: nounwind readonly
 declare i32 @llvm.amdgcn.struct.atom.buffer.load.i32(<4 x i32>, i32, i32, i32, i32 immarg)
 declare i64 @llvm.amdgcn.struct.atom.buffer.load.i64(<4 x i32>, i32, i32, i32, i32 immarg)
 declare <2 x i16> @llvm.amdgcn.struct.atom.buffer.load.v2i16(<4 x i32>, i32, i32, i32, i32 immarg)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
index bc300c9f6c60e..70296a0a7bec6 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
+; RUN: llc -global-isel=0 -march=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=CHECK
+; RUN: llc -global-isel=1 -march=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=CHECK
 
 define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
 ; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32:
@@ -353,7 +353,6 @@ bb2:
   ret void
 }
 
-; Function Attrs: nounwind readonly
 declare i32 @llvm.amdgcn.struct.ptr.atom.buffer.load.i32(ptr addrspace(8), i32, i32, i32, i32 immarg)
 declare i64 @llvm.amdgcn.struct.ptr.atom.buffer.load.i64(ptr addrspace(8), i32, i32, i32, i32 immarg)
 declare <2 x i16> @llvm.amdgcn.struct.ptr.atom.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32, i32 immarg)



More information about the llvm-commits mailing list