[llvm] [AMDGPU] Add llvm.amdgcn.dead intrinsic (PR #123190)
Diana Picus via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 11 01:25:27 PST 2025
https://github.com/rovka updated https://github.com/llvm/llvm-project/pull/123190
>From 9a44d961b403dbcd58d75be2d5d3d06c6ae9153d Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 14 Jan 2025 13:22:36 +0100
Subject: [PATCH 1/6] Precommit test using poison
This is going to look better when using amdgcn.dead instead.
---
.../AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 133 ++++++++++++++++++
1 file changed, 133 insertions(+)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index 1bdaa4c98127d03..20c3037a80b8e08 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -1115,4 +1115,137 @@ tail:
unreachable
}
+; Since functions that contain amdgcn.init.whole.wave do not preserve the inactive
+; lanes of any VGPRs, the middle end will explicitly preserve them if needed by adding
+; dummy VGPR arguments. Since only the inactive lanes are important, we need to make
+; it clear to the backend that it's safe to allocate v9 inside shader.
+; FIXME: Using poison is not clear enough.
+define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %exec, i32 inreg %sgpr, i32 %active.vgpr, i32 %inactive.vgpr) {
+; GISEL12-LABEL: with_inactive_vgprs:
+; GISEL12: ; %bb.0: ; %entry
+; GISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL12-NEXT: s_wait_expcnt 0x0
+; GISEL12-NEXT: s_wait_samplecnt 0x0
+; GISEL12-NEXT: s_wait_bvhcnt 0x0
+; GISEL12-NEXT: s_wait_kmcnt 0x0
+; GISEL12-NEXT: s_or_saveexec_b32 s6, -1
+; GISEL12-NEXT: s_mov_b32 s4, s0
+; GISEL12-NEXT: s_mov_b32 s5, s1
+; GISEL12-NEXT: s_mov_b32 s0, s3
+; GISEL12-NEXT: s_wait_alu 0xfffe
+; GISEL12-NEXT: s_and_saveexec_b32 s1, s6
+; GISEL12-NEXT: s_cbranch_execz .LBB6_2
+; GISEL12-NEXT: ; %bb.1: ; %shader
+; GISEL12-NEXT: v_dual_mov_b32 v11, s5 :: v_dual_mov_b32 v10, s4
+; GISEL12-NEXT: flat_load_b32 v12, v[10:11]
+; GISEL12-NEXT: ;;#ASMSTART
+; GISEL12-NEXT: ; use v0-7
+; GISEL12-NEXT: ;;#ASMEND
+; GISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v12
+; GISEL12-NEXT: flat_store_b32 v[10:11], v12
+; GISEL12-NEXT: .LBB6_2: ; %tail.block
+; GISEL12-NEXT: s_wait_alu 0xfffe
+; GISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GISEL12-NEXT: s_mov_b32 exec_lo, s2
+; GISEL12-NEXT: s_wait_alu 0xfffe
+; GISEL12-NEXT: s_setpc_b64 s[4:5]
+;
+; DAGISEL12-LABEL: with_inactive_vgprs:
+; DAGISEL12: ; %bb.0: ; %entry
+; DAGISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL12-NEXT: s_wait_expcnt 0x0
+; DAGISEL12-NEXT: s_wait_samplecnt 0x0
+; DAGISEL12-NEXT: s_wait_bvhcnt 0x0
+; DAGISEL12-NEXT: s_wait_kmcnt 0x0
+; DAGISEL12-NEXT: s_or_saveexec_b32 s6, -1
+; DAGISEL12-NEXT: s_mov_b32 s5, s1
+; DAGISEL12-NEXT: s_mov_b32 s4, s0
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
+; DAGISEL12-NEXT: s_and_saveexec_b32 s0, s6
+; DAGISEL12-NEXT: s_cbranch_execz .LBB6_2
+; DAGISEL12-NEXT: ; %bb.1: ; %shader
+; DAGISEL12-NEXT: v_dual_mov_b32 v11, s5 :: v_dual_mov_b32 v10, s4
+; DAGISEL12-NEXT: flat_load_b32 v12, v[10:11]
+; DAGISEL12-NEXT: ;;#ASMSTART
+; DAGISEL12-NEXT: ; use v0-7
+; DAGISEL12-NEXT: ;;#ASMEND
+; DAGISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
+; DAGISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v12
+; DAGISEL12-NEXT: flat_store_b32 v[10:11], v12
+; DAGISEL12-NEXT: .LBB6_2: ; %tail.block
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
+; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; DAGISEL12-NEXT: s_mov_b32 s0, s3
+; DAGISEL12-NEXT: s_mov_b32 exec_lo, s2
+; DAGISEL12-NEXT: s_wait_alu 0xfffe
+; DAGISEL12-NEXT: s_setpc_b64 s[4:5]
+;
+; GISEL10-LABEL: with_inactive_vgprs:
+; GISEL10: ; %bb.0: ; %entry
+; GISEL10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL10-NEXT: s_or_saveexec_b32 s6, -1
+; GISEL10-NEXT: s_mov_b32 s4, s0
+; GISEL10-NEXT: s_mov_b32 s5, s1
+; GISEL10-NEXT: s_mov_b32 s0, s3
+; GISEL10-NEXT: s_and_saveexec_b32 s1, s6
+; GISEL10-NEXT: s_cbranch_execz .LBB6_2
+; GISEL10-NEXT: ; %bb.1: ; %shader
+; GISEL10-NEXT: v_mov_b32_e32 v11, s5
+; GISEL10-NEXT: v_mov_b32_e32 v10, s4
+; GISEL10-NEXT: flat_load_dword v12, v[10:11]
+; GISEL10-NEXT: ;;#ASMSTART
+; GISEL10-NEXT: ; use v0-7
+; GISEL10-NEXT: ;;#ASMEND
+; GISEL10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v12
+; GISEL10-NEXT: flat_store_dword v[10:11], v12
+; GISEL10-NEXT: .LBB6_2: ; %tail.block
+; GISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GISEL10-NEXT: s_mov_b32 exec_lo, s2
+; GISEL10-NEXT: s_setpc_b64 s[4:5]
+;
+; DAGISEL10-LABEL: with_inactive_vgprs:
+; DAGISEL10: ; %bb.0: ; %entry
+; DAGISEL10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; DAGISEL10-NEXT: s_or_saveexec_b32 s6, -1
+; DAGISEL10-NEXT: s_mov_b32 s5, s1
+; DAGISEL10-NEXT: s_mov_b32 s4, s0
+; DAGISEL10-NEXT: s_and_saveexec_b32 s0, s6
+; DAGISEL10-NEXT: s_cbranch_execz .LBB6_2
+; DAGISEL10-NEXT: ; %bb.1: ; %shader
+; DAGISEL10-NEXT: v_mov_b32_e32 v11, s5
+; DAGISEL10-NEXT: v_mov_b32_e32 v10, s4
+; DAGISEL10-NEXT: flat_load_dword v12, v[10:11]
+; DAGISEL10-NEXT: ;;#ASMSTART
+; DAGISEL10-NEXT: ; use v0-7
+; DAGISEL10-NEXT: ;;#ASMEND
+; DAGISEL10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; DAGISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v12
+; DAGISEL10-NEXT: flat_store_dword v[10:11], v12
+; DAGISEL10-NEXT: .LBB6_2: ; %tail.block
+; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; DAGISEL10-NEXT: s_mov_b32 s0, s3
+; DAGISEL10-NEXT: s_mov_b32 exec_lo, s2
+; DAGISEL10-NEXT: s_setpc_b64 s[4:5]
+entry:
+ %1 = call i1 @llvm.amdgcn.init.whole.wave()
+ br i1 %1, label %shader, label %tail.block
+
+shader: ; preds = %entry
+ %use.another.vgpr = load i32, ptr %callee ; smth that won't be moved past the inline asm
+ call void asm sideeffect "; use v0-7", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7}"()
+ store i32 %use.another.vgpr, ptr %callee
+ %active.vgpr.new = add i32 %active.vgpr, %use.another.vgpr
+ br label %tail.block
+
+tail.block: ; preds = %.exit27, %.exit49, %244, %243, %entry
+ %active.vgpr.arg = phi i32 [ %active.vgpr, %entry ], [ %active.vgpr.new, %shader ]
+ %inactive.vgpr.arg = phi i32 [ %inactive.vgpr, %entry ], [ poison, %shader ]
+ %vgprs.0 = insertvalue { i32, i32 } poison, i32 %active.vgpr.arg, 0
+ %vgprs = insertvalue { i32, i32 } %vgprs.0, i32 %inactive.vgpr.arg, 1
+ call void (ptr, i32, i32, { i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain.p0.i32.i32.sl_i32i32(ptr inreg %callee, i32 inreg %exec, i32 inreg %sgpr, { i32, i32} %vgprs, i32 0)
+ unreachable
+}
+
declare amdgpu_gfx <16 x i32> @write_v0_v15(<16 x i32>)
>From b51d4b79067dbf72189c8f3f319bff1739a1dd52 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Wed, 8 Jan 2025 13:02:50 +0100
Subject: [PATCH 2/6] [AMDGPU] Add llvm.amdgcn.dead intrinsic
Shaders that use the llvm.amdgcn.init.whole.wave intrinsic need to
explicitly preserve the inactive lanes of VGPRs of interest by adding
them as dummy arguments. The code usually looks something like this:
```
define amdgcn_cs_chain void f(active vgpr args..., i32 %inactive.vgpr1, ..., i32 %inactive.vgprN) {
entry:
%c = call i1 @llvm.amdgcn.init.whole.wave()
br i1 %c, label %shader, label %tail
shader:
[...]
tail:
%inactive.vgpr.arg1 = phi i32 [ %inactive.vgpr1, %entry], [poison, %shader]
[...]
; %inactive.vgpr* then get passed into a llvm.amdgcn.cs.chain call
```
Unfortunately, this kind of phi node will get optimized away and the
backend won't be able to figure out that it's ok to use the active lanes
of `%inactive.vgpr*` inside `shader`.
This patch fixes the issue by introducing a llvm.amdgcn.dead intrinsic,
whose result can be used inside the PHI instead of the poison. This will
be selected to an IMPLICIT_DEF, which the backend can work with.
At the moment, the llvm.amdgcn.dead intrinsic works only on i32 values.
Support for other types can be added later if needed.
---
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 7 +++
.../AMDGPU/AMDGPUInstructionSelector.cpp | 6 +++
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 1 +
llvm/lib/Target/AMDGPU/SIInstructions.td | 6 +++
.../AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 47 ++++++++++---------
5 files changed, 46 insertions(+), 21 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index eb7bde69994913c..f8e9760fa220bb3 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3450,4 +3450,11 @@ def int_amdgcn_addrspacecast_nonnull : DefaultAttrsIntrinsic<
[llvm_anyptr_ty], [llvm_anyptr_ty],
[IntrNoMem, IntrSpeculatable]
>;
+
+/// Make it clear to the backend that this value is really dead. For instance,
+/// when used as an input to a phi node, it will make it possible for the
+/// backend to allocate the dead lanes for operations within the corresponding
+/// incoming block.
+def int_amdgcn_dead: DefaultAttrsIntrinsic<[llvm_i32_ty], [],
+ [IntrNoMem, IntrWillReturn, IntrNoCallback]>;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index 3bbbbcf71d8aecd..26d0cb85a984fb0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1190,6 +1190,12 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
case Intrinsic::amdgcn_permlane16_swap:
case Intrinsic::amdgcn_permlane32_swap:
return selectPermlaneSwapIntrin(I, IntrinsicID);
+ case Intrinsic::amdgcn_dead: {
+ I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
+ I.removeOperand(1); // drop intrinsic ID
+ return RBI.constrainGenericRegister(I.getOperand(0).getReg(),
+ AMDGPU::VGPR_32RegClass, *MRI);
+ }
default:
return selectImpl(I, *CoverageInfo);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 2e5f42c3bdc405c..2693ad3894ccabd 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4676,6 +4676,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_set_inactive_chain_arg:
case Intrinsic::amdgcn_permlane64:
case Intrinsic::amdgcn_ds_bpermute_fi_b32:
+ case Intrinsic::amdgcn_dead:
return getDefaultMappingAllVGPR(MI);
case Intrinsic::amdgcn_cvt_pkrtz:
if (Subtarget.hasSALUFloatInsts() && isSALUMapping(MI))
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 6e08aff24ec23af..5c32d69054e7387 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -4268,3 +4268,9 @@ def V_ILLEGAL : Enc32, InstSI<(outs), (ins), "v_illegal"> {
let hasSideEffects = 1;
let SubtargetPredicate = isGFX10Plus;
}
+
+// FIXME: Would be nice if we could set the register class for the destination
+// register too.
+def IMP_DEF_FROM_INTRINSIC: Pat<
+ (i32 (int_amdgcn_dead)), (IMPLICIT_DEF)>;
+
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index 20c3037a80b8e08..20b579a09629897 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -1118,8 +1118,8 @@ tail:
; Since functions that contain amdgcn.init.whole.wave do not preserve the inactive
; lanes of any VGPRs, the middle end will explicitly preserve them if needed by adding
; dummy VGPR arguments. Since only the inactive lanes are important, we need to make
-; it clear to the backend that it's safe to allocate v9 inside shader.
-; FIXME: Using poison is not clear enough.
+; it clear to the backend that it's safe to allocate v9's active lanes inside
+; shader. This is achieved by using the llvm.amdgcn.dead intrinsic.
define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %exec, i32 inreg %sgpr, i32 %active.vgpr, i32 %inactive.vgpr) {
; GISEL12-LABEL: with_inactive_vgprs:
; GISEL12: ; %bb.0: ; %entry
@@ -1136,14 +1136,15 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; GISEL12-NEXT: s_and_saveexec_b32 s1, s6
; GISEL12-NEXT: s_cbranch_execz .LBB6_2
; GISEL12-NEXT: ; %bb.1: ; %shader
-; GISEL12-NEXT: v_dual_mov_b32 v11, s5 :: v_dual_mov_b32 v10, s4
-; GISEL12-NEXT: flat_load_b32 v12, v[10:11]
+; GISEL12-NEXT: v_dual_mov_b32 v10, s5 :: v_dual_mov_b32 v9, s4
+; GISEL12-NEXT: flat_load_b32 v11, v[9:10]
; GISEL12-NEXT: ;;#ASMSTART
; GISEL12-NEXT: ; use v0-7
; GISEL12-NEXT: ;;#ASMEND
; GISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v12
-; GISEL12-NEXT: flat_store_b32 v[10:11], v12
+; GISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GISEL12-NEXT: flat_store_b32 v[9:10], v11
+; GISEL12-NEXT: ; implicit-def: $vgpr9
; GISEL12-NEXT: .LBB6_2: ; %tail.block
; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s1
@@ -1165,14 +1166,15 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; DAGISEL12-NEXT: s_and_saveexec_b32 s0, s6
; DAGISEL12-NEXT: s_cbranch_execz .LBB6_2
; DAGISEL12-NEXT: ; %bb.1: ; %shader
-; DAGISEL12-NEXT: v_dual_mov_b32 v11, s5 :: v_dual_mov_b32 v10, s4
-; DAGISEL12-NEXT: flat_load_b32 v12, v[10:11]
+; DAGISEL12-NEXT: v_dual_mov_b32 v10, s5 :: v_dual_mov_b32 v9, s4
+; DAGISEL12-NEXT: flat_load_b32 v11, v[9:10]
; DAGISEL12-NEXT: ;;#ASMSTART
; DAGISEL12-NEXT: ; use v0-7
; DAGISEL12-NEXT: ;;#ASMEND
; DAGISEL12-NEXT: s_wait_loadcnt_dscnt 0x0
-; DAGISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v12
-; DAGISEL12-NEXT: flat_store_b32 v[10:11], v12
+; DAGISEL12-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; DAGISEL12-NEXT: flat_store_b32 v[9:10], v11
+; DAGISEL12-NEXT: ; implicit-def: $vgpr9
; DAGISEL12-NEXT: .LBB6_2: ; %tail.block
; DAGISEL12-NEXT: s_wait_alu 0xfffe
; DAGISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s0
@@ -1191,15 +1193,16 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; GISEL10-NEXT: s_and_saveexec_b32 s1, s6
; GISEL10-NEXT: s_cbranch_execz .LBB6_2
; GISEL10-NEXT: ; %bb.1: ; %shader
-; GISEL10-NEXT: v_mov_b32_e32 v11, s5
-; GISEL10-NEXT: v_mov_b32_e32 v10, s4
-; GISEL10-NEXT: flat_load_dword v12, v[10:11]
+; GISEL10-NEXT: v_mov_b32_e32 v10, s5
+; GISEL10-NEXT: v_mov_b32_e32 v9, s4
+; GISEL10-NEXT: flat_load_dword v11, v[9:10]
; GISEL10-NEXT: ;;#ASMSTART
; GISEL10-NEXT: ; use v0-7
; GISEL10-NEXT: ;;#ASMEND
; GISEL10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; GISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v12
-; GISEL10-NEXT: flat_store_dword v[10:11], v12
+; GISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; GISEL10-NEXT: flat_store_dword v[9:10], v11
+; GISEL10-NEXT: ; implicit-def: $vgpr9
; GISEL10-NEXT: .LBB6_2: ; %tail.block
; GISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GISEL10-NEXT: s_mov_b32 exec_lo, s2
@@ -1214,21 +1217,23 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; DAGISEL10-NEXT: s_and_saveexec_b32 s0, s6
; DAGISEL10-NEXT: s_cbranch_execz .LBB6_2
; DAGISEL10-NEXT: ; %bb.1: ; %shader
-; DAGISEL10-NEXT: v_mov_b32_e32 v11, s5
-; DAGISEL10-NEXT: v_mov_b32_e32 v10, s4
-; DAGISEL10-NEXT: flat_load_dword v12, v[10:11]
+; DAGISEL10-NEXT: v_mov_b32_e32 v10, s5
+; DAGISEL10-NEXT: v_mov_b32_e32 v9, s4
+; DAGISEL10-NEXT: flat_load_dword v11, v[9:10]
; DAGISEL10-NEXT: ;;#ASMSTART
; DAGISEL10-NEXT: ; use v0-7
; DAGISEL10-NEXT: ;;#ASMEND
; DAGISEL10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; DAGISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v12
-; DAGISEL10-NEXT: flat_store_dword v[10:11], v12
+; DAGISEL10-NEXT: v_add_nc_u32_e32 v8, v8, v11
+; DAGISEL10-NEXT: flat_store_dword v[9:10], v11
+; DAGISEL10-NEXT: ; implicit-def: $vgpr9
; DAGISEL10-NEXT: .LBB6_2: ; %tail.block
; DAGISEL10-NEXT: s_or_b32 exec_lo, exec_lo, s0
; DAGISEL10-NEXT: s_mov_b32 s0, s3
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s2
; DAGISEL10-NEXT: s_setpc_b64 s[4:5]
entry:
+ %0 = call i32 @llvm.amdgcn.dead()
%1 = call i1 @llvm.amdgcn.init.whole.wave()
br i1 %1, label %shader, label %tail.block
@@ -1241,7 +1246,7 @@ shader: ; preds = %entry
tail.block: ; preds = %.exit27, %.exit49, %244, %243, %entry
%active.vgpr.arg = phi i32 [ %active.vgpr, %entry ], [ %active.vgpr.new, %shader ]
- %inactive.vgpr.arg = phi i32 [ %inactive.vgpr, %entry ], [ poison, %shader ]
+ %inactive.vgpr.arg = phi i32 [ %inactive.vgpr, %entry ], [ %0, %shader ]
%vgprs.0 = insertvalue { i32, i32 } poison, i32 %active.vgpr.arg, 0
%vgprs = insertvalue { i32, i32 } %vgprs.0, i32 %inactive.vgpr.arg, 1
call void (ptr, i32, i32, { i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain.p0.i32.i32.sl_i32i32(ptr inreg %callee, i32 inreg %exec, i32 inreg %sgpr, { i32, i32} %vgprs, i32 0)
>From aac0defc0e1de1072353c5891670cc88e503a871 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Thu, 16 Jan 2025 14:20:21 +0100
Subject: [PATCH 3/6] Fixup - mark as source of divergence
---
llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td | 2 ++
1 file changed, 2 insertions(+)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
index 10175557fadc71c..3b62dcf3c92cdfc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSearchableTables.td
@@ -362,6 +362,8 @@ def : SourceOfDivergence<int_amdgcn_inverse_ballot>;
foreach intr = AMDGPUImageDimAtomicIntrinsics in
def : SourceOfDivergence<intr>;
+def : SourceOfDivergence<int_amdgcn_dead>;
+
class AlwaysUniform<Intrinsic intr> {
Intrinsic Intr = intr;
}
>From 3ac18d3c931e6be1127f99c99ec09eeba6ce824f Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Thu, 16 Jan 2025 15:06:47 +0100
Subject: [PATCH 4/6] Fixup - add test (derp)
---
.../Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
index aa5208560817fdc..02c16715166e420 100644
--- a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/intrinsics.ll
@@ -520,7 +520,12 @@ define amdgpu_kernel void @v_permlane32_swap(ptr addrspace(1) %out, i32 %src0, i
ret void
}
-
+; CHECK: DIVERGENT: %v = call i32 @llvm.amdgcn.dead()
+define amdgpu_cs_chain void @dead(ptr addrspace(1) %out) {
+ %v = call i32 @llvm.amdgcn.dead()
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
declare i32 @llvm.amdgcn.ds.swizzle(i32, i32) #1
declare i32 @llvm.amdgcn.permlane16.i32(i32, i32, i32, i32, i1, i1) #1
@@ -558,5 +563,7 @@ declare <4 x i16> @llvm.amdgcn.global.load.tr.b128.v4i16(ptr addrspace(1))
declare <4 x half> @llvm.amdgcn.global.load.tr.b128.v4f16(ptr addrspace(1))
declare <4 x bfloat> @llvm.amdgcn.global.load.tr.b128.v4bf16(ptr addrspace(1))
+declare i32 @llvm.amdgcn.dead()
+
attributes #0 = { nounwind convergent }
attributes #1 = { nounwind readnone convergent }
>From 52aebbf48f4d2b21e15ab621ef29aa3a50cf4d17 Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Fri, 17 Jan 2025 14:00:49 +0100
Subject: [PATCH 5/6] More testing
---
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll | 66 +++++++++++++++++++
.../AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 8 +--
2 files changed, 70 insertions(+), 4 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
new file mode 100644
index 000000000000000..291f7fa23afd3cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=ASM-DAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefix=ASM-GISEL %s
+
+; Test that we can use v0 for temporaries in the if.then block.
+define i32 @dead(i1 %cond, i32 %x, ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr2) #0 {
+; ASM-DAG-LABEL: dead:
+; ASM-DAG: ; %bb.0: ; %entry
+; ASM-DAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; ASM-DAG-NEXT: s_wait_expcnt 0x0
+; ASM-DAG-NEXT: s_wait_samplecnt 0x0
+; ASM-DAG-NEXT: s_wait_bvhcnt 0x0
+; ASM-DAG-NEXT: s_wait_kmcnt 0x0
+; ASM-DAG-NEXT: v_mov_b32_e32 v4, v0
+; ASM-DAG-NEXT: v_mov_b32_e32 v0, v1
+; ASM-DAG-NEXT: s_mov_b32 s0, exec_lo
+; ASM-DAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; ASM-DAG-NEXT: v_and_b32_e32 v1, 1, v4
+; ASM-DAG-NEXT: v_cmpx_eq_u32_e32 1, v1
+; ASM-DAG-NEXT: s_cbranch_execz .LBB0_2
+; ASM-DAG-NEXT: ; %bb.1: ; %if.then
+; ASM-DAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; ASM-DAG-NEXT: global_store_b32 v[2:3], v0, off
+; ASM-DAG-NEXT: ; implicit-def: $vgpr0
+; ASM-DAG-NEXT: .LBB0_2: ; %if.end
+; ASM-DAG-NEXT: s_wait_alu 0xfffe
+; ASM-DAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; ASM-DAG-NEXT: s_wait_alu 0xfffe
+; ASM-DAG-NEXT: s_setpc_b64 s[30:31]
+;
+; ASM-GISEL-LABEL: dead:
+; ASM-GISEL: ; %bb.0: ; %entry
+; ASM-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; ASM-GISEL-NEXT: s_wait_expcnt 0x0
+; ASM-GISEL-NEXT: s_wait_samplecnt 0x0
+; ASM-GISEL-NEXT: s_wait_bvhcnt 0x0
+; ASM-GISEL-NEXT: s_wait_kmcnt 0x0
+; ASM-GISEL-NEXT: v_mov_b32_e32 v4, v0
+; ASM-GISEL-NEXT: v_mov_b32_e32 v0, v1
+; ASM-GISEL-NEXT: s_mov_b32 s0, exec_lo
+; ASM-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; ASM-GISEL-NEXT: v_and_b32_e32 v1, 1, v4
+; ASM-GISEL-NEXT: v_cmpx_ne_u32_e32 0, v1
+; ASM-GISEL-NEXT: s_cbranch_execz .LBB0_2
+; ASM-GISEL-NEXT: ; %bb.1: ; %if.then
+; ASM-GISEL-NEXT: v_add_nc_u32_e32 v0, 1, v0
+; ASM-GISEL-NEXT: global_store_b32 v[2:3], v0, off
+; ASM-GISEL-NEXT: ; implicit-def: $vgpr0
+; ASM-GISEL-NEXT: .LBB0_2: ; %if.end
+; ASM-GISEL-NEXT: s_wait_alu 0xfffe
+; ASM-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; ASM-GISEL-NEXT: s_wait_alu 0xfffe
+; ASM-GISEL-NEXT: s_setpc_b64 s[30:31]
+entry:
+ %dead = call i32 @llvm.amdgcn.dead()
+ br i1 %cond, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ %temp = add i32 %x, 1
+ store i32 %temp, ptr addrspace(1) %ptr1
+ br label %if.end
+
+if.end:
+ %res = phi i32 [ %x, %entry ], [ %dead, %if.then ]
+ ret i32 %res
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index 20b579a09629897..eedc4680742ee78 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -1233,9 +1233,9 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; DAGISEL10-NEXT: s_mov_b32 exec_lo, s2
; DAGISEL10-NEXT: s_setpc_b64 s[4:5]
entry:
- %0 = call i32 @llvm.amdgcn.dead()
- %1 = call i1 @llvm.amdgcn.init.whole.wave()
- br i1 %1, label %shader, label %tail.block
+ %imp.def = call i32 @llvm.amdgcn.dead()
+ %initial.exec = call i1 @llvm.amdgcn.init.whole.wave()
+ br i1 %initial.exec, label %shader, label %tail.block
shader: ; preds = %entry
%use.another.vgpr = load i32, ptr %callee ; smth that won't be moved past the inline asm
@@ -1246,7 +1246,7 @@ shader: ; preds = %entry
tail.block: ; preds = %.exit27, %.exit49, %244, %243, %entry
%active.vgpr.arg = phi i32 [ %active.vgpr, %entry ], [ %active.vgpr.new, %shader ]
- %inactive.vgpr.arg = phi i32 [ %inactive.vgpr, %entry ], [ %0, %shader ]
+ %inactive.vgpr.arg = phi i32 [ %inactive.vgpr, %entry ], [ %imp.def, %shader ]
%vgprs.0 = insertvalue { i32, i32 } poison, i32 %active.vgpr.arg, 0
%vgprs = insertvalue { i32, i32 } %vgprs.0, i32 %inactive.vgpr.arg, 1
call void (ptr, i32, i32, { i32, i32 }, i32, ...) @llvm.amdgcn.cs.chain.p0.i32.i32.sl_i32i32(ptr inreg %callee, i32 inreg %exec, i32 inreg %sgpr, { i32, i32} %vgprs, i32 0)
>From 5abc0891adfde146654ae08cccf05b17b611714b Mon Sep 17 00:00:00 2001
From: Diana Picus <diana-magda.picus at amd.com>
Date: Tue, 11 Feb 2025 10:24:56 +0100
Subject: [PATCH 6/6] Fixup tests after rebase
---
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll | 3 +--
llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll | 1 -
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
index 291f7fa23afd3cc..993eda9e6105196 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.dead.ll
@@ -25,7 +25,6 @@ define i32 @dead(i1 %cond, i32 %x, ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr
; ASM-DAG-NEXT: .LBB0_2: ; %if.end
; ASM-DAG-NEXT: s_wait_alu 0xfffe
; ASM-DAG-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; ASM-DAG-NEXT: s_wait_alu 0xfffe
; ASM-DAG-NEXT: s_setpc_b64 s[30:31]
;
; ASM-GISEL-LABEL: dead:
@@ -49,8 +48,8 @@ define i32 @dead(i1 %cond, i32 %x, ptr addrspace(1) %ptr1, ptr addrspace(1) %ptr
; ASM-GISEL-NEXT: .LBB0_2: ; %if.end
; ASM-GISEL-NEXT: s_wait_alu 0xfffe
; ASM-GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s0
-; ASM-GISEL-NEXT: s_wait_alu 0xfffe
; ASM-GISEL-NEXT: s_setpc_b64 s[30:31]
+; ASM-DAG: ; %bb.0: ; %entry
entry:
%dead = call i32 @llvm.amdgcn.dead()
br i1 %cond, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
index eedc4680742ee78..110192ecefe55f5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.init.whole.wave-w32.ll
@@ -1149,7 +1149,6 @@ define amdgpu_cs_chain void @with_inactive_vgprs(ptr inreg %callee, i32 inreg %e
; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_or_b32 exec_lo, exec_lo, s1
; GISEL12-NEXT: s_mov_b32 exec_lo, s2
-; GISEL12-NEXT: s_wait_alu 0xfffe
; GISEL12-NEXT: s_setpc_b64 s[4:5]
;
; DAGISEL12-LABEL: with_inactive_vgprs:
More information about the llvm-commits
mailing list