[llvm] [AMDGPU] Add intrinsic for raw atomic buffer loads (PR #97707)
Jessica Del via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 22 07:25:55 PDT 2024
https://github.com/OutOfCache updated https://github.com/llvm/llvm-project/pull/97707
>From 67c793210dc9b96eb97e4a580bd1655ccc77eefc Mon Sep 17 00:00:00 2001
From: rtayl <>
Date: Wed, 3 Jul 2024 19:01:41 +0200
Subject: [PATCH 1/9] [AMDGPU] Add raw.atomic.buffer.load intrinsic
This adds llvm.amdgcn.raw.atomic.buffer.load intrinsic to support
OpAtomicLoad lowering on AMDGPU. Previously this was lowered to
llvm.amdgcn.raw.buffer.load which caused the load in some cases
to be marked as invariant and hoisted in LICM.
Co-authored-by: Jay Foad <jay.foad at amd.com>
Co-authored-by: Mariusz Sikora <mariusz.sikora at amd.com>
Co-authored-by: Jessica Del <50999226+OutOfCache at users.noreply.github.com>
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 13 +
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 +
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 1 +
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 1 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 8 +
.../llvm.amdgcn.raw.atomic.buffer.load.ll | 304 ++++++++++++++++++
6 files changed, 329 insertions(+)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 71b1e832bde3c..52fd1dab154c9 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1116,6 +1116,19 @@ class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsi
def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
+class AMDGPURawAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
+ [llvm_v4i32_ty, // rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
+ [ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_raw_atomic_buffer_load : AMDGPURawAtomicBufferLoad;
+
class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[data_ty],
[AMDGPUBufferRsrcTy, // rsrc(SGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 93bca4402ed23..ac72e29ba6675 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1245,6 +1245,7 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
unsigned OffsetIdx;
switch (II.getIntrinsicID()) {
case Intrinsic::amdgcn_raw_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
OffsetIdx = 1;
break;
@@ -1378,6 +1379,7 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
case Intrinsic::amdgcn_s_buffer_load:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a219d01518458..16665fb453e33 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7345,6 +7345,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
return legalizeBufferStore(MI, MRI, B, true, true);
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_ptr_buffer_load:
return legalizeBufferLoad(MI, MRI, B, false, false);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9e7694f41d6b8..efe14e723c4a0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4984,6 +4984,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load: {
// FIXME: Should make intrinsic ID the last operand of the instruction,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 12977af0d7e85..257b23463e58b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1272,6 +1272,13 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = CI.getArgOperand(1);
return true;
}
+ case Intrinsic::amdgcn_raw_atomic_buffer_load: {
+ Info.memVT =
+ memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
+ std::numeric_limits<unsigned>::max());
+ Info.flags &= ~MachineMemOperand::MOStore;
+ return true;
+ }
}
}
return true;
@@ -8897,6 +8904,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
}
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format: {
const bool IsFormat =
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
new file mode 100644
index 0000000000000..44f1089e4554b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB0_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB0_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 0, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_off:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB1_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB1_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 0, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_soff:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB2_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB2_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 4, i32 4, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB3_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB3_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32> %addr, i32 4, i32 0, i32 4)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_nonatomic_buffer_load_i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: .LBB4_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT: s_or_b32 s0, s1, s0
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT: s_cbranch_execnz .LBB4_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32> %addr, i32 4, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_i64:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB5_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB5_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %id.zext = zext i32 %id to i64
+ br label %bb1
+bb1:
+ %load = call i64 @llvm.amdgcn.raw.atomic.buffer.load.i64(<4 x i32> %addr, i32 4, i32 0, i32 1)
+ %cmp = icmp eq i64 %load, %id.zext
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v2i16:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB6_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB6_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <2 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v2i16(<4 x i32> %addr, i32 0, i32 0, i32 1)
+ %bitcast = bitcast <2 x i16> %load to i32
+ %cmp = icmp eq i32 %bitcast, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v4i16:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB7_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB7_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <4 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v4i16(<4 x i32> %addr, i32 4, i32 0, i32 1)
+ %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+ %bitcast = bitcast <2 x i16> %shortened to i32
+ %cmp = icmp eq i32 %bitcast, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_v4i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB8_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB8_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <4 x i32> @llvm.amdgcn.raw.atomic.buffer.load.v4i32(<4 x i32> %addr, i32 4, i32 0, i32 1)
+ %extracted = extractelement <4 x i32> %load, i32 3
+ %cmp = icmp eq i32 %extracted, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) {
+; CHECK-LABEL: raw_atomic_buffer_load_ptr:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB9_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_load_b32 v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB9_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call ptr @llvm.amdgcn.raw.atomic.buffer.load.ptr(<4 x i32> %addr, i32 4, i32 0, i32 1)
+ %elem = load i32, ptr %load
+ %cmp = icmp eq i32 %elem, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.raw.atomic.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.raw.atomic.buffer.load.i64(<4 x i32>, i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v2i16(<4 x i32>, i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.raw.atomic.buffer.load.v4i16(<4 x i32>, i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.raw.atomic.buffer.load.v4i32(<4 x i32>, i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.raw.atomic.buffer.load.ptr(<4 x i32>, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.raw.buffer.load.i32(<4 x i32>, i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()
>From 98c54990a28026a81bacb7cdf96688e3d739524d Mon Sep 17 00:00:00 2001
From: Jay Foad <jay.foad at amd.com>
Date: Wed, 3 Jul 2024 21:10:26 +0200
Subject: [PATCH 2/9] [AMDGPU] Add llvm.amdgcn.raw.ptr.atomic.buffer.load
Add a "ptr" form of the downstream-only
llvm.amdgcn.raw.atomic.buffer.load intrinsic. For upstream intrinsics
this was done by:
https://reviews.llvm.org/D147547 "[AMDGPU] Add buffer intrinsics that take resources as pointers"
Co-authored-by: Jessica Del <50999226+OutOfCache at users.noreply.github.com>
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 13 +
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 +
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 1 +
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 1 +
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +-
.../llvm.amdgcn.raw.atomic.ptr.buffer.load.ll | 304 ++++++++++++++++++
6 files changed, 324 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 52fd1dab154c9..dc53e9e778a4f 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1147,6 +1147,19 @@ class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntri
def int_amdgcn_raw_ptr_buffer_load_format : AMDGPURawPtrBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_load : AMDGPURawPtrBufferLoad;
+class AMDGPURawAtomicPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+ [data_ty],
+ [AMDGPUBufferRsrcTy,// rsrc(SGPR)
+ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
+ llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
+ llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc,
+ // bit 1 = slc,
+ // bit 2 = dlc on gfx10+),
+ // swizzled buffer (bit 3 = swz))
+ [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
+ AMDGPURsrcIntrinsic<0>;
+def int_amdgcn_raw_atomic_ptr_buffer_load : AMDGPURawAtomicPtrBufferLoad;
+
class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[data_ty],
[llvm_v4i32_ty, // rsrc(SGPR)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index ac72e29ba6675..11aac1f22524d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1247,6 +1247,7 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
OffsetIdx = 1;
break;
case Intrinsic::amdgcn_s_buffer_load:
@@ -1380,6 +1381,7 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
case Intrinsic::amdgcn_s_buffer_load:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 16665fb453e33..9411689d2d858 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7346,6 +7346,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_ptr_buffer_load:
return legalizeBufferLoad(MI, MRI, B, false, false);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index efe14e723c4a0..f497295d8b5e5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4985,6 +4985,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load: {
// FIXME: Should make intrinsic ID the last operand of the instruction,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 257b23463e58b..c1c320173ae9a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1272,7 +1272,8 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
Info.ptrVal = CI.getArgOperand(1);
return true;
}
- case Intrinsic::amdgcn_raw_atomic_buffer_load: {
+ case Intrinsic::amdgcn_raw_atomic_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load: {
Info.memVT =
memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
std::numeric_limits<unsigned>::max());
@@ -8905,6 +8906,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
+ case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format: {
const bool IsFormat =
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
new file mode 100644
index 0000000000000..987aa43aa4242
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
@@ -0,0 +1,304 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_ptr_load_i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB0_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB0_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_off(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_off:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB1_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB1_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_soff:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB2_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB2_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 4, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_dlc:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB3_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB3_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 4)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_nonatomic_ptr_buffer_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_nonatomic_ptr_buffer_load_i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: .LBB4_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; CHECK-NEXT: s_or_b32 s0, s1, s0
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; CHECK-NEXT: s_cbranch_execnz .LBB4_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %cmp = icmp eq i32 %load, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i64(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_i64:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: v_mov_b32_e32 v1, 0
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB5_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB5_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %id.zext = zext i32 %id to i64
+ br label %bb1
+bb1:
+ %load = call i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %cmp = icmp eq i64 %load, %id.zext
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v2i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v2i16:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB6_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB6_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %bitcast = bitcast <2 x i16> %load to i32
+ %cmp = icmp eq i32 %bitcast, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i16:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB7_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; CHECK-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB7_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
+ %bitcast = bitcast <2 x i16> %shortened to i32
+ %cmp = icmp eq i32 %bitcast, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i32:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB8_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB8_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %extracted = extractelement <4 x i32> %load, i32 3
+ %cmp = icmp eq i32 %extracted, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+define amdgpu_kernel void @raw_atomic_ptr_buffer_load_ptr(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_atomic_ptr_buffer_load_ptr:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
+; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: .LBB9_1: ; %bb1
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: flat_load_b32 v1, v[1:2]
+; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; CHECK-NEXT: s_cbranch_execnz .LBB9_1
+; CHECK-NEXT: ; %bb.2: ; %bb2
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = tail call i32 @llvm.amdgcn.workitem.id.x()
+ br label %bb1
+bb1:
+ %load = call ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %elem = load i32, ptr %load
+ %cmp = icmp eq i32 %elem, %id
+ br i1 %cmp, label %bb1, label %bb2
+bb2:
+ ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8), i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8), i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.workitem.id.x()
>From 2d84d4115c28b750239adc27a570f4e50052f805 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 11:32:30 +0200
Subject: [PATCH 3/9] fixup! [AMDGPU] Add raw.atomic.buffer.load intrinsic
---
llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 --
1 file changed, 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 11aac1f22524d..33d86ac52a777 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1245,7 +1245,6 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
unsigned OffsetIdx;
switch (II.getIntrinsicID()) {
case Intrinsic::amdgcn_raw_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
OffsetIdx = 1;
@@ -1380,7 +1379,6 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
- case Intrinsic::amdgcn_raw_atomic_buffer_load:
case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
>From 88311cf85aae019b9893a340cefc636d829835bd Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 11:36:09 +0200
Subject: [PATCH 4/9] fixup! [AMDGPU] Add
llvm.amdgcn.raw.atomic.ptr.buffer.load
---
llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 2 --
1 file changed, 2 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 33d86ac52a777..93bca4402ed23 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1246,7 +1246,6 @@ static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
switch (II.getIntrinsicID()) {
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
OffsetIdx = 1;
break;
case Intrinsic::amdgcn_s_buffer_load:
@@ -1379,7 +1378,6 @@ std::optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load:
case Intrinsic::amdgcn_s_buffer_load:
>From 18fc7ab6072f1e1f5af5e51b59240a75adfde4ce Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 8 Jul 2024 13:43:31 +0200
Subject: [PATCH 5/9] fixup! [AMDGPU] Add
llvm.amdgcn.raw.atomic.ptr.buffer.load
---
llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp | 5 +++--
.../test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll | 6 +++---
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index 470180f2bcd28..b5e35dfedd3ba 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1092,8 +1092,9 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
Intrinsic::ID IID = Intrinsic::not_intrinsic;
if (isa<LoadInst>(I))
- // TODO: Do we need to do something about atomic loads?
- IID = Intrinsic::amdgcn_raw_ptr_buffer_load;
+ IID = Order == AtomicOrdering::NotAtomic
+ ? Intrinsic::amdgcn_raw_ptr_buffer_load
+ : Intrinsic::amdgcn_raw_atomic_ptr_buffer_load;
else if (isa<StoreInst>(I))
IID = Intrinsic::amdgcn_raw_ptr_buffer_store;
else if (auto *RMW = dyn_cast<AtomicRMWInst>(I)) {
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
index 66c68f7cc731e..8949d146266a9 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
@@ -17,10 +17,10 @@ define void @loads(ptr addrspace(8) %buf) {
; CHECK-NEXT: [[VOLATILE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483648)
; CHECK-NEXT: [[VOLATILE_NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483646), !nontemporal [[META0]]
; CHECK-NEXT: fence syncscope("wavefront") release
-; CHECK-NEXT: [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
+; CHECK-NEXT: [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
; CHECK-NEXT: fence syncscope("wavefront") acquire
-; CHECK-NEXT: [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
-; CHECK-NEXT: [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT: [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT: [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
; CHECK-NEXT: fence acquire
; CHECK-NEXT: ret void
;
>From 551a6f8ada3dccd1144d8d351ba1281503a979ef Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Thu, 11 Jul 2024 19:56:08 +0200
Subject: [PATCH 6/9] fixup! [AMDGPU] Add
llvm.amdgcn.raw.atomic.ptr.buffer.load
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 4 +-
.../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 2 +-
.../AMDGPU/AMDGPULowerBufferFatPointers.cpp | 2 +-
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 2 +-
llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 +-
...llvm.amdgcn.raw.ptr.atomic.buffer.load.ll} | 70 +++++++++----------
6 files changed, 42 insertions(+), 42 deletions(-)
rename llvm/test/CodeGen/AMDGPU/{llvm.amdgcn.raw.atomic.ptr.buffer.load.ll => llvm.amdgcn.raw.ptr.atomic.buffer.load.ll} (80%)
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index dc53e9e778a4f..4cb4d24d53498 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -1147,7 +1147,7 @@ class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntri
def int_amdgcn_raw_ptr_buffer_load_format : AMDGPURawPtrBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_load : AMDGPURawPtrBufferLoad;
-class AMDGPURawAtomicPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
+class AMDGPURawPtrAtomicBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
[data_ty],
[AMDGPUBufferRsrcTy,// rsrc(SGPR)
llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
@@ -1158,7 +1158,7 @@ class AMDGPURawAtomicPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
// swizzled buffer (bit 3 = swz))
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
AMDGPURsrcIntrinsic<0>;
-def int_amdgcn_raw_atomic_ptr_buffer_load : AMDGPURawAtomicPtrBufferLoad;
+def int_amdgcn_raw_ptr_atomic_buffer_load : AMDGPURawPtrAtomicBufferLoad;
class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
[data_ty],
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 9411689d2d858..c7cd69fe54b37 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -7346,7 +7346,7 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_ptr_buffer_load:
return legalizeBufferLoad(MI, MRI, B, false, false);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
index b5e35dfedd3ba..e9f251e83b37c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerBufferFatPointers.cpp
@@ -1094,7 +1094,7 @@ Value *SplitPtrStructs::handleMemoryInst(Instruction *I, Value *Arg, Value *Ptr,
if (isa<LoadInst>(I))
IID = Order == AtomicOrdering::NotAtomic
? Intrinsic::amdgcn_raw_ptr_buffer_load
- : Intrinsic::amdgcn_raw_atomic_ptr_buffer_load;
+ : Intrinsic::amdgcn_raw_ptr_atomic_buffer_load;
else if (isa<StoreInst>(I))
IID = Intrinsic::amdgcn_raw_ptr_buffer_store;
else if (auto *RMW = dyn_cast<AtomicRMWInst>(I)) {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index f497295d8b5e5..3f91fcbc877d7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4985,7 +4985,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
case Intrinsic::amdgcn_raw_tbuffer_load:
case Intrinsic::amdgcn_raw_ptr_tbuffer_load: {
// FIXME: Should make intrinsic ID the last operand of the instruction,
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index c1c320173ae9a..31be41f047ef0 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1273,7 +1273,7 @@ bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return true;
}
case Intrinsic::amdgcn_raw_atomic_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load: {
+ case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load: {
Info.memVT =
memVTFromLoadIntrReturn(*this, MF.getDataLayout(), CI.getType(),
std::numeric_limits<unsigned>::max());
@@ -8906,7 +8906,7 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_raw_buffer_load:
case Intrinsic::amdgcn_raw_ptr_buffer_load:
case Intrinsic::amdgcn_raw_atomic_buffer_load:
- case Intrinsic::amdgcn_raw_atomic_ptr_buffer_load:
+ case Intrinsic::amdgcn_raw_ptr_atomic_buffer_load:
case Intrinsic::amdgcn_raw_buffer_load_format:
case Intrinsic::amdgcn_raw_ptr_buffer_load_format: {
const bool IsFormat =
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
similarity index 80%
rename from llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
rename to llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 987aa43aa4242..3fa722bfc966f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.ptr.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -2,8 +2,8 @@
; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
-define amdgpu_kernel void @raw_atomic_ptr_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_ptr_load_i32:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -23,15 +23,15 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %load = call i32 @llvm.amdgcn.raw.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
%cmp = icmp eq i32 %load, %id
br i1 %cmp, label %bb1, label %bb2
bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_off(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_off:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_off:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -51,14 +51,14 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %load = call i32 @llvm.amdgcn.raw.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
%cmp = icmp eq i32 %load, %id
br i1 %cmp, label %bb1, label %bb2
bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_soff:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -78,14 +78,14 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 4, i32 1)
+ %load = call i32 @llvm.amdgcn.raw.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 4, i32 1)
%cmp = icmp eq i32 %load, %id
br i1 %cmp, label %bb1, label %bb2
bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_i32_dlc:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -105,15 +105,15 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 4)
+ %load = call i32 @llvm.amdgcn.raw.ptr.atomic.buffer.load.i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 4)
%cmp = icmp eq i32 %load, %id
br i1 %cmp, label %bb1, label %bb2
bb2:
ret void
}
-define amdgpu_kernel void @raw_nonatomic_ptr_buffer_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_nonatomic_ptr_buffer_load_i32:
+define amdgpu_kernel void @raw_nonptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_nonptr_atomic_buffer_load_i32:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
@@ -141,8 +141,8 @@ bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_i64(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_i64:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_i64:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: v_mov_b32_e32 v1, 0
@@ -164,15 +164,15 @@ bb:
%id.zext = zext i32 %id to i64
br label %bb1
bb1:
- %load = call i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %load = call i64 @llvm.amdgcn.raw.ptr.atomic.buffer.load.i64(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
%cmp = icmp eq i64 %load, %id.zext
br i1 %cmp, label %bb1, label %bb2
bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v2i16(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_v2i16:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_v2i16:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -192,7 +192,7 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
+ %load = call <2 x i16> @llvm.amdgcn.raw.ptr.atomic.buffer.load.v2i16(ptr addrspace(8) %ptr, i32 0, i32 0, i32 1)
%bitcast = bitcast <2 x i16> %load to i32
%cmp = icmp eq i32 %bitcast, %id
br i1 %cmp, label %bb1, label %bb2
@@ -200,8 +200,8 @@ bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i16(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i16:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i16:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -224,7 +224,7 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %load = call <4 x i16> @llvm.amdgcn.raw.ptr.atomic.buffer.load.v4i16(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
%shortened = shufflevector <4 x i16> %load, <4 x i16> poison, <2 x i32> <i32 0, i32 2>
%bitcast = bitcast <2 x i16> %shortened to i32
%cmp = icmp eq i32 %bitcast, %id
@@ -233,8 +233,8 @@ bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_v4i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_v4i32:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i32:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -254,7 +254,7 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %load = call <4 x i32> @llvm.amdgcn.raw.ptr.atomic.buffer.load.v4i32(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
%extracted = extractelement <4 x i32> %load, i32 3
%cmp = icmp eq i32 %extracted, %id
br i1 %cmp, label %bb1, label %bb2
@@ -262,8 +262,8 @@ bb2:
ret void
}
-define amdgpu_kernel void @raw_atomic_ptr_buffer_load_ptr(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_atomic_ptr_buffer_load_ptr:
+define amdgpu_kernel void @raw_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr) {
+; CHECK-LABEL: raw_ptr_atomic_buffer_load_ptr:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
; CHECK-NEXT: s_mov_b32 s4, 0
@@ -285,7 +285,7 @@ bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
bb1:
- %load = call ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
+ %load = call ptr @llvm.amdgcn.raw.ptr.atomic.buffer.load.ptr(ptr addrspace(8) %ptr, i32 4, i32 0, i32 1)
%elem = load i32, ptr %load
%cmp = icmp eq i32 %elem, %id
br i1 %cmp, label %bb1, label %bb2
@@ -294,11 +294,11 @@ bb2:
}
; Function Attrs: nounwind readonly
-declare i32 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
-declare i64 @llvm.amdgcn.raw.atomic.ptr.buffer.load.i64(ptr addrspace(8), i32, i32, i32 immarg)
-declare <2 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32 immarg)
-declare <4 x i16> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32 immarg)
-declare <4 x i32> @llvm.amdgcn.raw.atomic.ptr.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32 immarg)
-declare ptr @llvm.amdgcn.raw.atomic.ptr.buffer.load.ptr(ptr addrspace(8), i32, i32, i32 immarg)
+declare i32 @llvm.amdgcn.raw.ptr.atom.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare i64 @llvm.amdgcn.raw.ptr.atom.buffer.load.i64(ptr addrspace(8), i32, i32, i32 immarg)
+declare <2 x i16> @llvm.amdgcn.raw.ptr.atom.buffer.load.v2i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i16> @llvm.amdgcn.raw.ptr.atom.buffer.load.v4i16(ptr addrspace(8), i32, i32, i32 immarg)
+declare <4 x i32> @llvm.amdgcn.raw.ptr.atom.buffer.load.v4i32(ptr addrspace(8), i32, i32, i32 immarg)
+declare ptr @llvm.amdgcn.raw.ptr.atom.buffer.load.ptr(ptr addrspace(8), i32, i32, i32 immarg)
declare i32 @llvm.amdgcn.raw.ptr.buffer.load.i32(ptr addrspace(8), i32, i32, i32 immarg)
declare i32 @llvm.amdgcn.workitem.id.x()
>From 5b578a7dd099048e76f4a58249d37f6e48d27934 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Fri, 12 Jul 2024 14:11:15 +0200
Subject: [PATCH 7/9] fixup! [AMDGPU] Add
llvm.amdgcn.raw.ptr.atomic.buffer.load
---
.../test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
index 8949d146266a9..57028a0f9b14f 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-memops.ll
@@ -17,10 +17,10 @@ define void @loads(ptr addrspace(8) %buf) {
; CHECK-NEXT: [[VOLATILE:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483648)
; CHECK-NEXT: [[VOLATILE_NONTEMPORAL:%.*]] = call float @llvm.amdgcn.raw.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483646), !nontemporal [[META0]]
; CHECK-NEXT: fence syncscope("wavefront") release
-; CHECK-NEXT: [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
+; CHECK-NEXT: [[ATOMIC:%.*]] = call float @llvm.amdgcn.raw.ptr.atomic.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 -2147483647)
; CHECK-NEXT: fence syncscope("wavefront") acquire
-; CHECK-NEXT: [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
-; CHECK-NEXT: [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.atomic.ptr.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT: [[ATOMIC_MONOTONIC:%.*]] = call float @llvm.amdgcn.raw.ptr.atomic.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
+; CHECK-NEXT: [[ATOMIC_ACQUIRE:%.*]] = call float @llvm.amdgcn.raw.ptr.atomic.buffer.load.f32(ptr addrspace(8) align 4 [[BUF]], i32 16, i32 0, i32 1)
; CHECK-NEXT: fence acquire
; CHECK-NEXT: ret void
;
>From 4a27a4987b5b1094060fe8817946005c818a3738 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 22 Jul 2024 16:24:28 +0200
Subject: [PATCH 8/9] fixup! [AMDGPU] Add raw.atomic.buffer.load intrinsic
---
.../test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
index 44f1089e4554b..03f94d6e853f0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) {
; CHECK-LABEL: raw_atomic_buffer_load_i32:
>From 500b81212cdaca0ca92d8b01646b634ba9c95205 Mon Sep 17 00:00:00 2001
From: Jessica Del <Jessica.Del at amd.com>
Date: Mon, 22 Jul 2024 16:24:48 +0200
Subject: [PATCH 9/9] fixup! [AMDGPU] Add
llvm.amdgcn.raw.ptr.atomic.buffer.load
---
.../CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 3fa722bfc966f..3228335073d07 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=0 | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -global-isel=1 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=0 | FileCheck %s -check-prefix=CHECK
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1100 -global-isel=1 | FileCheck %s -check-prefix=CHECK
define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
More information about the llvm-commits
mailing list