[llvm] c657a6f - [AMDGPU] Fix selection of s_load_b96 on GFX11 (#108029)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Sep 12 05:41:44 PDT 2024
Author: Jay Foad
Date: 2024-09-12T13:41:40+01:00
New Revision: c657a6f6aa7e802d65aba84b8b3fe2eb5e2459d8
URL: https://github.com/llvm/llvm-project/commit/c657a6f6aa7e802d65aba84b8b3fe2eb5e2459d8
DIFF: https://github.com/llvm/llvm-project/commit/c657a6f6aa7e802d65aba84b8b3fe2eb5e2459d8.diff
LOG: [AMDGPU] Fix selection of s_load_b96 on GFX11 (#108029)
Fix a bug which resulted in selection of s_load_b96 on GFX11, which only
exists in GFX12.
The root cause was a mismatch between legalization and selection. The
condition used to check that the load was uniform in legalization
(SITargetLowering::LowerLOAD) was "!Op->isDivergent()". The condition
used to detect a non-uniform load during selection
(AMDGPUDAGToDAGISel::isUniformLoad()) was
"N->isDivergent() && !AMDGPUInstrInfo::isUniformMMO(MMO)". This makes a
difference when IR uniformity analysis has more information than SDAG's
built in analysis. In the test case this is because IR UA reports that
everything is uniform if isSingleLaneExecution() returns true, e.g. if
the specified max flat workgroup size is 1, but SDAG does not have this
optimization.
The immediate fix is to use the same condition to detect uniform loads
in legalization and selection. In future SDAG should learn about
isSingleLaneExecution(), and then it could probably stop relying on IR
metadata to detect uniform loads.
Added:
llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
Modified:
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 62e22c15af168e..4a861f0c03a0c5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10262,6 +10262,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
LoadSDNode *Load = cast<LoadSDNode>(Op);
ISD::LoadExtType ExtType = Load->getExtensionType();
EVT MemVT = Load->getMemoryVT();
+ MachineMemOperand *MMO = Load->getMemOperand();
if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
@@ -10272,7 +10273,6 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Load->getChain();
SDValue BasePtr = Load->getBasePtr();
- MachineMemOperand *MMO = Load->getMemOperand();
EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
@@ -10328,25 +10328,12 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
unsigned NumElements = MemVT.getVectorNumElements();
- if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
- AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
- if (!Op->isDivergent() && Alignment >= Align(4) && NumElements < 32) {
- if (MemVT.isPow2VectorType() ||
- (Subtarget->hasScalarDwordx3Loads() && NumElements == 3))
- return SDValue();
- return WidenOrSplitVectorLoad(Op, DAG);
- }
- // Non-uniform loads will be selected to MUBUF instructions, so they
- // have the same legalization requirements as global and private
- // loads.
- //
- }
-
if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
- AS == AMDGPUAS::GLOBAL_ADDRESS) {
- if (Subtarget->getScalarizeGlobalBehavior() && !Op->isDivergent() &&
- Load->isSimple() && isMemOpHasNoClobberedMemOperand(Load) &&
+ (AS == AMDGPUAS::GLOBAL_ADDRESS &&
+ Subtarget->getScalarizeGlobalBehavior() && Load->isSimple() &&
+ isMemOpHasNoClobberedMemOperand(Load))) {
+ if ((!Op->isDivergent() || AMDGPUInstrInfo::isUniformMMO(MMO)) &&
Alignment >= Align(4) && NumElements < 32) {
if (MemVT.isPow2VectorType() ||
(Subtarget->hasScalarDwordx3Loads() && NumElements == 3))
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
new file mode 100644
index 00000000000000..668ebe3f953b32
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-always-uniform.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s -check-prefix=GFX11
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s -check-prefix=GFX12
+
+define amdgpu_cs void @test_uniform_load_b96(ptr addrspace(1) %ptr, i32 %arg) "amdgpu-flat-work-group-size"="1,1" {
+; GFX11-LABEL: test_uniform_load_b96:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: v_mov_b32_e32 v3, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_lshlrev_b64 v[2:3], 2, v[2:3]
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_readfirstlane_b32 s0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_readfirstlane_b32 s1, v3
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x8
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v2, s3
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_or3_b32 v2, s2, v2, s0
+; GFX11-NEXT: global_store_b32 v[0:1], v2, off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: test_uniform_load_b96:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: v_mov_b32_e32 v3, 0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_lshlrev_b64_e32 v[2:3], 2, v[2:3]
+; GFX12-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
+; GFX12-NEXT: v_readfirstlane_b32 s0, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX12-NEXT: v_readfirstlane_b32 s1, v3
+; GFX12-NEXT: s_load_b96 s[0:2], s[0:1], 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_or3_b32 v2, v2, s1, s2
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_nop 0
+; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX12-NEXT: s_endpgm
+bb:
+ %i = zext i32 %arg to i64
+ %i1 = getelementptr i32, ptr addrspace(1) %ptr, i64 %i
+ %i2 = load <3 x i32>, ptr addrspace(1) %i1, align 4
+ %i3 = extractelement <3 x i32> %i2, i32 0
+ %i4 = extractelement <3 x i32> %i2, i32 1
+ %i5 = extractelement <3 x i32> %i2, i32 2
+ %i6 = or i32 %i3, %i4
+ %i7 = or i32 %i5, %i6
+ store i32 %i7, ptr addrspace(1) %ptr, align 4
+ ret void
+}
More information about the llvm-commits
mailing list