[llvm] [AMDGPU] Add intrinsic for raw atomic buffer loads (PR #97707)

Jessica Del via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 8 06:59:58 PDT 2024


https://github.com/OutOfCache updated https://github.com/llvm/llvm-project/pull/97707

>From 67c793210dc9b96eb97e4a580bd1655ccc77eefc Mon Sep 17 00:00:00 2001
From: rtayl <>
Date: Wed, 3 Jul 2024 19:01:41 +0200
Subject: [PATCH 1/5] [AMDGPU] Add raw.atomic.buffer.load intrinsic

This adds llvm.amdgcn.raw.atomic.buffer.load intrinsic to support
OpAtomicLoad lowering on AMDGPU. Previously this was lowered to
llvm.amdgcn.raw.buffer.load which caused the load in some cases
to be marked as invariant and hoisted in LICM.

Co-authored-by: Jay Foad <jay.foad at amd.com>
Co-authored-by: Mariusz Sikora <mariusz.sikora at amd.com>
Co-authored-by: Jessica Del <50999226+OutOfCache at users.noreply.github.com>
---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  13 +
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |   2 +
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |   1 +
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |   1 +
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   8 +
 .../llvm.amdgcn.raw.atomic.buffer.load.ll     | 304 ++++++++++++++++++
 6 files changed, 329 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 71b1e832bde3c..52fd1dab154c9 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1116,6 +1116,19 @@ class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsi
 def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
 def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
 
+class AMDGPURawAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+  [data_ty],
+  [llvm_v4i32_ty,     // rsrc(SGPR)
+   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
+   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
+                      //                                       bit 1 = slc,
+                      //                                       bit 2 = dlc on gfx10+),
+                      //                      swizzled buffer (bit 3 = swz))
+  [ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+  AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_raw_atomic_buffer_load : AMDGPURawAtomicBufferLoad;
+
 class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [data_ty],
   [AMDGPUBufferRsrcTy,    // rsrc(SGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 93bca4402ed23..ac72e29ba6675 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1245,6 +1245,7 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
       unsigned OffsetIdx;
       switch (II.getIntrinsicID()) {
       case Intrinsic::amdgcn_raw_buffer_load:
+      case Intrinsic::amdgcn_raw_atomic_buffer_load:
       case Intrinsic::amdgcn_raw_ptr_buffer_load:
         OffsetIdx = 1;
         break;
@@ -1378,6 +1379,7 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
+  case Intrinsic::amdgcn_raw_atomic_buffer_load:
   case Intrinsic::amdgcn_raw_tbuffer_load:
   case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
   case Intrinsic::amdgcn_s_buffer_load:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a219d01518458..16665fb453e33 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7345,6 +7345,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
     return legalizeBufferStore(MI, MRI, B, true, true);
   case Intrinsic::amdgcn_raw_buffer_load:
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
+  case Intrinsic::amdgcn_raw_atomic_buffer_load:
   case Intrinsic::amdgcn_struct_buffer_load:
   case Intrinsic::amdgcn_struct_ptr_buffer_load:
     return legalizeBufferLoad(MI, MRI, B, false, false);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9e7694f41d6b8..efe14e723c4a0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4984,6 +4984,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     }
     case Intrinsic::amdgcn_raw_buffer_load:
     case Intrinsic::amdgcn_raw_ptr_buffer_load:
+    case Intrinsic::amdgcn_raw_atomic_buffer_load:
     case Intrinsic::amdgcn_raw_tbuffer_load:
     case Intrinsic::amdgcn_raw_ptr_tbuffer_load: {
       // FIXME: Should make intrinsic ID the last operand of the instruction,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 12977af0d7e85..257b23463e58b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1272,6 +1272,13 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
         Info.ptrVal = CI.getArgOperand(1);
         return true;
       }
+      case Intrinsic::amdgcn_raw_atomic_buffer_load: {
+        Info.memVT =
+            memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
+                                    std::numeric_limits<unsigned>::max());
+        Info.flags &= ~MachineMemOperand::MOStore;
+        return true;
+      }
       }
     }
     return true;
@@ -8897,6 +8904,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
   }
   case Intrinsic::amdgcn_raw_buffer_load:
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
+  case Intrinsic::amdgcn_raw_atomic_buffer_load:
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format: {
     const bool IsFormat =
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
new file mode 100644
index 0000000000000..44f1089e4554b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB0_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_off:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB1_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB1_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_soff:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB2_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB2_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 4, i32 4, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB3_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB3_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 4, i32 0, i32 4)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_nonatomic_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_mov_b32 s0, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:  .LBB4_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT:    s_or_b32 s0, s1, s0
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT:    s_cbranch_execnz .LBB4_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %addr, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i64:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB5_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB5_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.zext = zext i32 %id to i64
+  br label %bb1
+bb1:
+  %load = call i64 @llvm.amdgcn.raw.atomic.buffer.load.i64(<4 x i32> %addr, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i64 %load, %id.zext
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v2i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB6_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB6_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <2 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v2i16(<4 x i32> %addr, i32 0, i32 0, i32 1)
+  %bitcast = bitcast <2 x i16> %load to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v4i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB7_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB7_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v4i16(<4 x i32> %addr, i32 4, i32 0, i32 1)
+  %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+  %bitcast = bitcast <2 x i16> %shortened to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v4i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB8_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB8_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i32> @llvm.amdgcn.raw.atomic.buffer.load.v4i32(<4 x i32> %addr, i32 4, i32 0, i32 1)
+  %extracted = extractelement <4 x i32> %load, i32 3
+  %cmp = icmp eq i32 %extracted, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_ptr:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB9_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    flat_load_b32 v1, v[1:2]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB9_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call ptr @llvm.amdgcn.raw.atomic.buffer.load.ptr(<4 x i32> %addr, i32 4, i32 0, i32 1)
+  %elem = load i32, ptr %load
+  %cmp = icmp eq i32 %elem, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.raw.atomic.buffer.load.i64(<4 x i32>, i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v2i16(<4 x i32>, i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v4i16(<4 x i32>, i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.raw.atomic.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.raw.atomic.buffer.load.ptr(<4 x i32>, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()

>From 198b3601e2fca00e5106850f2fcaca8a0090b09f Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 3 Jul 2024 21:10:26 +0200
Subject: [PATCH 2/5] [AMDGPU] Add llvm.amdgcn.raw.atomic.ptr.buffer.load

Add a "ptr" form of the downstream-only
llvm.amdgcn.raw.atomic.buffer.load intrinsic. For upstream intrinsics
this was done by:
https://reviews.llvm.org/D147547 "[AMDGPU] Add buffer intrinsics that take resources as pointers"

Co-authored-by: Jessica Del <50999226+OutOfCache at users.noreply.github.com>
---
 llvm/include/llvm/IR/IntrinsicsAMDGPU.td      |  13 +
 .../AMDGPU/AMDGPUInstCombineIntrinsic.cpp     |   2 +
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |   1 +
 .../Target/AMDGPU/AMDGPURegisterBankInfo.cpp  |   1 +
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   4 +-
 .../llvm.amdgcn.raw.atomic.ptr.buffer.load.ll | 304 ++++++++++++++++++
 6 files changed, 324 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll

diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 52fd1dab154c9..dc53e9e778a4f 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1147,6 +1147,19 @@ class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntri
 def int_amdgcn_raw_ptr_buffer_load_format : AMDGPURawPtrBufferLoad<llvm_anyfloat_ty>;
 def int_amdgcn_raw_ptr_buffer_load : AMDGPURawPtrBufferLoad;
 
+class AMDGPURawAtomicPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+  [data_ty],
+  [AMDGPUBufferRsrcTy,// rsrc(SGPR)
+   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
+   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
+                      //                                       bit 1 = slc,
+                      //                                       bit 2 = dlc on gfx10+),
+                      //                      swizzled buffer (bit 3 = swz))
+  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+  AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_raw_atomic_ptr_buffer_load : AMDGPURawAtomicPtrBufferLoad;
+
 class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
   [data_ty],
   [llvm_v4i32_ty,    // rsrc(SGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index ac72e29ba6675..11aac1f22524d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1247,6 +1247,7 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
       case Intrinsic::amdgcn_raw_buffer_load:
       case Intrinsic::amdgcn_raw_atomic_buffer_load:
       case Intrinsic::amdgcn_raw_ptr_buffer_load:
+      case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
         OffsetIdx = 1;
         break;
       case Intrinsic::amdgcn_s_buffer_load:
@@ -1380,6 +1381,7 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
   case Intrinsic::amdgcn_raw_atomic_buffer_load:
+  case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_tbuffer_load:
   case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
   case Intrinsic::amdgcn_s_buffer_load:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 16665fb453e33..9411689d2d858 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7346,6 +7346,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
   case Intrinsic::amdgcn_raw_buffer_load:
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_atomic_buffer_load:
+  case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
   case Intrinsic::amdgcn_struct_buffer_load:
   case Intrinsic::amdgcn_struct_ptr_buffer_load:
     return legalizeBufferLoad(MI, MRI, B, false, false);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index efe14e723c4a0..f497295d8b5e5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4985,6 +4985,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     case Intrinsic::amdgcn_raw_buffer_load:
     case Intrinsic::amdgcn_raw_ptr_buffer_load:
     case Intrinsic::amdgcn_raw_atomic_buffer_load:
+    case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
     case Intrinsic::amdgcn_raw_tbuffer_load:
     case Intrinsic::amdgcn_raw_ptr_tbuffer_load: {
       // FIXME: Should make intrinsic ID the last operand of the instruction,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 257b23463e58b..c1c320173ae9a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1272,7 +1272,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
         Info.ptrVal = CI.getArgOperand(1);
         return true;
       }
-      case Intrinsic::amdgcn_raw_atomic_buffer_load: {
+      case Intrinsic::amdgcn_raw_atomic_buffer_load:
+      case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load: {
         Info.memVT =
             memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
                                     std::numeric_limits<unsigned>::max());
@@ -8905,6 +8906,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
   case Intrinsic::amdgcn_raw_buffer_load:
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_atomic_buffer_load:
+  case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format: {
     const bool IsFormat =
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
new file mode 100644
index 0000000000000..987aa43aa4242
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_ptr_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB0_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_off(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_off:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB1_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB1_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_soff:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB2_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB2_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 4, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_dlc:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB3_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB3_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 4)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_nonatomic_ptr_buffer_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_nonatomic_ptr_buffer_load_i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_mov_b32 s0, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:  .LBB4_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT:    s_or_b32 s0, s1, s0
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT:    s_cbranch_execnz .LBB4_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i32 %load, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i64(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i64:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    v_mov_b32_e32 v1, 0
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB5_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB5_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %id.zext = zext i32 %id to i64
+  br label %bb1
+bb1:
+  %load = call i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+  %cmp = icmp eq i64 %load, %id.zext
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v2i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v2i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB6_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB6_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+  %bitcast = bitcast <2 x i16> %load to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i16:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB7_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v1, 0xffff, v1
+; CHECK-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT:    v_lshl_or_b32 v1, v2, 16, v1
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB7_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+  %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+  %bitcast = bitcast <2 x i16> %shortened to i32
+  %cmp = icmp eq i32 %bitcast, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i32:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB8_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB8_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+  %extracted = extractelement <4 x i32> %load, i32 3
+  %cmp = icmp eq i32 %extracted, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_ptr(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_ptr:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  .LBB9_1: ; %bb1
+; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
+; CHECK-NEXT:    buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    flat_load_b32 v1, v[1:2]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT:    s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT:    s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT:    s_cbranch_execnz .LBB9_1
+; CHECK-NEXT:  ; %bb.2: ; %bb2
+; CHECK-NEXT:    s_endpgm
+bb:
+  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+  br label %bb1
+bb1:
+  %load = call ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+  %elem = load i32, ptr %load
+  %cmp = icmp eq i32 %elem, %id
+  br i1 %cmp, label %bb1, label %bb2
+bb2:
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8), i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8), i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()

>From e96a14ee4f297a2c5d091387bda4b1738f354710 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 11:32:30 +0200
Subject: [PATCH 3/5] fixup! [AMDGPU] Add raw.atomic.buffer.load intrinsic

---
 llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 11aac1f22524d..33d86ac52a777 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1245,7 +1245,6 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
       unsigned OffsetIdx;
       switch (II.getIntrinsicID()) {
       case Intrinsic::amdgcn_raw_buffer_load:
-      case Intrinsic::amdgcn_raw_atomic_buffer_load:
       case Intrinsic::amdgcn_raw_ptr_buffer_load:
       case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
         OffsetIdx = 1;
@@ -1380,7 +1379,6 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
-  case Intrinsic::amdgcn_raw_atomic_buffer_load:
   case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_tbuffer_load:
   case Intrinsic::amdgcn_raw_ptr_tbuffer_load:

>From d939607d75f51ef91adbf46efc9bd00cd0d93666 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 11:36:09 +0200
Subject: [PATCH 4/5] fixup! [AMDGPU] Add
 llvm.amdgcn.raw.atomic.ptr.buffer.load

---
 llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 33d86ac52a777..93bca4402ed23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1246,7 +1246,6 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
       switch (II.getIntrinsicID()) {
       case Intrinsic::amdgcn_raw_buffer_load:
       case Intrinsic::amdgcn_raw_ptr_buffer_load:
-      case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
         OffsetIdx = 1;
         break;
       case Intrinsic::amdgcn_s_buffer_load:
@@ -1379,7 +1378,6 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
   case Intrinsic::amdgcn_raw_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_buffer_load_format:
   case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
-  case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
   case Intrinsic::amdgcn_raw_tbuffer_load:
   case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
   case Intrinsic::amdgcn_s_buffer_load:

>From 93b6b5b776667997fcea77ec9b52c72c2b5b3ff5 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 13:43:31 +0200
Subject: [PATCH 5/5] fixup! [AMDGPU] Add
 llvm.amdgcn.raw.atomic.ptr.buffer.load

---
 llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp     | 5 +++--
 .../test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll | 6 +++---
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 470180f2bcd28..b5e35dfedd3ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1092,8 +1092,9 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
 
   Intrinsic::ID IID = Intrinsic::not_intrinsic;
   if (isa<LoadInst>(I))
-    // TODO: Do we need to do something about atomic loads?
-    IID = Intrinsic::amdgcn_raw_ptr_buffer_load;
+    IID = Order == AtomicOrdering::NotAtomic
+              ? Intrinsic::amdgcn_raw_ptr_buffer_load
+              : Intrinsic::amdgcn_raw_atomic_ptr_buffer_load;
   else if (isa<StoreInst>(I))
     IID = Intrinsic::amdgcn_raw_ptr_buffer_store;
   else if (auto *RMW = dyn_cast<AtomicRMWInst>(I)) {
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
index 66c68f7cc731e..8949d146266a9 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
@@ -17,10 +17,10 @@ define void @loads(ptr addrspace(8) %buf) {
 ; CHECK-NEXT:    [[VOLATILE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483648)
 ; CHECK-NEXT:    [[VOLATILE_NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483646), !nontemporal [[META0]]
 ; CHECK-NEXT:    fence syncscope("wavefront") release
-; CHECK-NEXT:    [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
+; CHECK-NEXT:    [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
 ; CHECK-NEXT:    fence syncscope("wavefront") acquire
-; CHECK-NEXT:    [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
-; CHECK-NEXT:    [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT:    [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT:    [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
 ; CHECK-NEXT:    fence acquire
 ; CHECK-NEXT:    ret void
 ;



More information about the llvm-commits mailing list