[llvm] [AMDGPU] Fix handling of FP in cs.chain functions (PR #161194)
Robert Imschweiler via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 29 08:59:07 PDT 2025
https://github.com/ro-i updated https://github.com/llvm/llvm-project/pull/161194
>From de66e74ef34bb6461f0c27fb52494b08e1f3cecd Mon Sep 17 00:00:00 2001
From: Robert Imschweiler <robert.imschweiler at amd.com>
Date: Mon, 24 Mar 2025 05:51:23 -0500
Subject: [PATCH 1/2] [AMDGPU] Fix handling of FP in cs.chain functions
In case there is an dynamic alloca / an alloca which is not in the entry
block, cs.chain functions do not set up an FP, but are reported to need
one. This results in a failed assertion in
`SIFrameLowering::emitPrologue()` (Assertion `(!HasFP || FPSaved) &&
"Needed to save FP but didn't save it anywhere"' failed.) This commit
changes `hasFPImpl` so that the need for an SP in a cs.chain function
does not directly imply the need for an FP anymore.
This LLVM defect was identified via the AMD Fuzzing project.
---
llvm/lib/Target/AMDGPU/SIFrameLowering.cpp | 4 +-
.../AMDGPU/amdgpu-cs-chain-fp-nosave.ll | 360 ++++++++++++++++++
2 files changed, 363 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 7c5d4fc2dacf6..7c2ce2737f7be 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -2166,7 +2166,9 @@ bool SIFrameLowering::hasFPImpl(const MachineFunction &MF) const {
return MFI.getStackSize() != 0;
}
- return frameTriviallyRequiresSP(MFI) || MFI.isFrameAddressTaken() ||
+ return (frameTriviallyRequiresSP(MFI) &&
+ !MF.getInfo<SIMachineFunctionInfo>()->isChainFunction()) ||
+ MFI.isFrameAddressTaken() ||
MF.getSubtarget<GCNSubtarget>().getRegisterInfo()->hasStackRealignment(
MF) ||
mayReserveScratchForCWSR(MF) ||
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
new file mode 100644
index 0000000000000..a2696fe160067
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
@@ -0,0 +1,360 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -o - < %s 2>&1 | FileCheck %s
+
+; These situations are "special" in that they have an alloca not in the entry
+; block, which affects prolog/epilog generation.
+
+declare amdgpu_gfx void @foo()
+
+define amdgpu_cs_chain void @test_alloca() {
+; CHECK-LABEL: test_alloca:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 s0, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s0, 0x200
+; CHECK-NEXT: scratch_store_b32 off, v0, s0
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 1, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
+
+define amdgpu_cs_chain void @test_alloca_var_uniform(i32 inreg %count) {
+; CHECK-LABEL: test_alloca_var_uniform:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_lshl_b32 s0, s0, 2
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s0, s0, 15
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_and_b32 s0, s0, -16
+; CHECK-NEXT: s_mov_b32 s1, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_lshl_b32 s0, s0, 5
+; CHECK-NEXT: scratch_store_b32 off, v0, s1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s1, s0
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
+
+define amdgpu_cs_chain void @test_alloca_var(i32 %count) {
+; CHECK-LABEL: test_alloca_var:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; CHECK-NEXT: s_mov_b32 s1, exec_lo
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
+; CHECK-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_ctz_i32_b32 s2, s1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readlane_b32 s3, v1, s2
+; CHECK-NEXT: s_bitset0_b32 s1, s2
+; CHECK-NEXT: s_max_u32 s0, s0, s3
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_cmp_lg_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB2_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: s_mov_b32 s1, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: v_lshl_add_u32 v1, s0, 5, s1
+; CHECK-NEXT: scratch_store_b32 off, v0, s1
+; CHECK-NEXT: v_readfirstlane_b32 s32, v1
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
+
+define amdgpu_cs_chain void @test_alloca_and_call() {
+; CHECK-LABEL: test_alloca_and_call:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s1, s1
+; CHECK-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 s2, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s2, 0x200
+; CHECK-NEXT: scratch_store_b32 off, v0, s2
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 1, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ call amdgpu_gfx void @foo()
+ ret void
+}
+
+define amdgpu_cs_chain void @test_alloca_and_call_var_uniform(i32 inreg %count) {
+; CHECK-LABEL: test_alloca_and_call_var_uniform:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_getpc_b64 s[2:3]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s3, s3
+; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; CHECK-NEXT: s_lshl_b32 s0, s0, 2
+; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; CHECK-NEXT: s_add_co_i32 s0, s0, 15
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_and_b32 s0, s0, -16
+; CHECK-NEXT: s_mov_b32 s1, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_lshl_b32 s0, s0, 5
+; CHECK-NEXT: scratch_store_b32 off, v0, s1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s1, s0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ call amdgpu_gfx void @foo()
+ ret void
+}
+
+define amdgpu_cs_chain void @test_alloca_and_call_var(i32 %count) {
+; CHECK-LABEL: test_alloca_and_call_var:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; CHECK-NEXT: s_mov_b32 s1, exec_lo
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
+; CHECK-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_ctz_i32_b32 s2, s1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readlane_b32 s3, v1, s2
+; CHECK-NEXT: s_bitset0_b32 s1, s2
+; CHECK-NEXT: s_max_u32 s0, s0, s3
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_cmp_lg_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB5_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: s_getpc_b64 s[2:3]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s3, s3
+; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; CHECK-NEXT: s_mov_b32 s1, s32
+; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; CHECK-NEXT: v_lshl_add_u32 v1, s0, 5, s1
+; CHECK-NEXT: scratch_store_b32 off, v0, s1
+; CHECK-NEXT: v_readfirstlane_b32 s32, v1
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xf1ff
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ store i32 0, ptr addrspace(5) %v, align 4
+ call amdgpu_gfx void @foo()
+ ret void
+}
+
+define amdgpu_cs_chain void @test_call_and_alloca() {
+; CHECK-LABEL: test_call_and_alloca:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s1, s1
+; CHECK-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; CHECK-NEXT: s_mov_b32 s4, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s4, 0x200
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: scratch_store_b32 off, v0, s4
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 1, align 4, addrspace(5)
+ call amdgpu_gfx void @foo()
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
+
+define amdgpu_cs_chain void @test_call_and_alloca_var_uniform(i32 inreg %count) {
+; CHECK-LABEL: test_call_and_alloca_var_uniform:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_getpc_b64 s[2:3]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s3, s3
+; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; CHECK-NEXT: s_lshl_b32 s0, s0, 2
+; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; CHECK-NEXT: s_add_co_i32 s0, s0, 15
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_and_b32 s0, s0, -16
+; CHECK-NEXT: s_mov_b32 s4, s32
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_lshl_b32 s0, s0, 5
+; CHECK-NEXT: v_mov_b32_e32 v40, 0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_i32 s32, s4, s0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; CHECK-NEXT: scratch_store_b32 off, v40, s4
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ call amdgpu_gfx void @foo()
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
+
+define amdgpu_cs_chain void @test_call_and_alloca_var(i32 %count) {
+; CHECK-LABEL: test_call_and_alloca_var:
+; CHECK: ; %bb.0: ; %.entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; CHECK-NEXT: v_mov_b32_e32 v40, 0
+; CHECK-NEXT: s_mov_b32 s1, exec_lo
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_mov_b32 s32, 16
+; CHECK-NEXT: v_and_b32_e32 v0, -16, v0
+; CHECK-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_ctz_i32_b32 s2, s1
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readlane_b32 s3, v0, s2
+; CHECK-NEXT: s_bitset0_b32 s1, s2
+; CHECK-NEXT: s_max_u32 s0, s0, s3
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_cmp_lg_u32 s1, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB8_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: s_getpc_b64 s[2:3]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s3, s3
+; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; CHECK-NEXT: s_mov_b32 s4, s32
+; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; CHECK-NEXT: v_lshl_add_u32 v0, s0, 5, s4
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readfirstlane_b32 s32, v0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xf1ff
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; CHECK-NEXT: scratch_store_b32 off, v40, s4
+; CHECK-NEXT: s_endpgm
+.entry:
+ br label %SW_C
+
+SW_C: ; preds = %.entry
+ %v = alloca i32, i32 %count, align 4, addrspace(5)
+ call amdgpu_gfx void @foo()
+ store i32 0, ptr addrspace(5) %v, align 4
+ ret void
+}
>From 97a5437d790e9db6a12a9546d5e44c61d4d16a64 Mon Sep 17 00:00:00 2001
From: Robert Imschweiler <robert.imschweiler at amd.com>
Date: Mon, 29 Sep 2025 10:58:21 -0500
Subject: [PATCH 2/2] add gfx942 as test target
---
.../AMDGPU/amdgpu-cs-chain-fp-nosave.ll | 698 +++++++++++-------
1 file changed, 440 insertions(+), 258 deletions(-)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
index a2696fe160067..4946f05616a2c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-cs-chain-fp-nosave.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -o - < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -o - < %s 2>&1 | FileCheck %s -check-prefix=GFX12
+; RUN: llc -mtriple=amdgcn -mcpu=gfx942 -o - < %s 2>&1 | FileCheck %s -check-prefix=GFX942
; These situations are "special" in that they have an alloca not in the entry
; block, which affects prolog/epilog generation.
@@ -7,21 +8,31 @@
declare amdgpu_gfx void @foo()
define amdgpu_cs_chain void @test_alloca() {
-; CHECK-LABEL: test_alloca:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_mov_b32 s0, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s0, 0x200
-; CHECK-NEXT: scratch_store_b32 off, v0, s0
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_mov_b32 s0, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s0, 0x200
+; GFX12-NEXT: scratch_store_b32 off, v0, s0
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: s_mov_b32 s0, s32
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_add_i32 s32, s0, 0x400
+; GFX942-NEXT: scratch_store_dword off, v0, s0
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -32,27 +43,41 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_alloca_var_uniform(i32 inreg %count) {
-; CHECK-LABEL: test_alloca_var_uniform:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_lshl_b32 s0, s0, 2
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s0, s0, 15
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_and_b32 s0, s0, -16
-; CHECK-NEXT: s_mov_b32 s1, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_lshl_b32 s0, s0, 5
-; CHECK-NEXT: scratch_store_b32 off, v0, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s1, s0
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca_var_uniform:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s0, s0, 15
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_and_b32 s0, s0, -16
+; GFX12-NEXT: s_mov_b32 s1, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_lshl_b32 s0, s0, 5
+; GFX12-NEXT: scratch_store_b32 off, v0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s1, s0
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca_var_uniform:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_lshl_b32 s0, s0, 2
+; GFX942-NEXT: s_add_i32 s0, s0, 15
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: s_and_b32 s0, s0, -16
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_lshl_b32 s0, s0, 6
+; GFX942-NEXT: s_mov_b32 s1, s32
+; GFX942-NEXT: s_add_i32 s32, s1, s0
+; GFX942-NEXT: scratch_store_dword off, v0, s1
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -63,37 +88,61 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_alloca_var(i32 %count) {
-; CHECK-LABEL: test_alloca_var:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
-; CHECK-NEXT: s_mov_b32 s1, exec_lo
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
-; CHECK-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_ctz_i32_b32 s2, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readlane_b32 s3, v1, s2
-; CHECK-NEXT: s_bitset0_b32 s1, s2
-; CHECK-NEXT: s_max_u32 s0, s0, s3
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_cmp_lg_u32 s1, 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB2_1
-; CHECK-NEXT: ; %bb.2:
-; CHECK-NEXT: s_mov_b32 s1, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: v_lshl_add_u32 v1, s0, 5, s1
-; CHECK-NEXT: scratch_store_b32 off, v0, s1
-; CHECK-NEXT: v_readfirstlane_b32 s32, v1
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca_var:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX12-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
+; GFX12-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_ctz_i32_b32 s2, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readlane_b32 s3, v1, s2
+; GFX12-NEXT: s_bitset0_b32 s1, s2
+; GFX12-NEXT: s_max_u32 s0, s0, s3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12-NEXT: s_cbranch_scc1 .LBB2_1
+; GFX12-NEXT: ; %bb.2:
+; GFX12-NEXT: s_mov_b32 s1, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: v_lshl_add_u32 v1, s0, 5, s1
+; GFX12-NEXT: scratch_store_b32 off, v0, s1
+; GFX12-NEXT: v_readfirstlane_b32 s32, v1
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca_var:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX942-NEXT: v_and_b32_e32 v1, -16, v0
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_mov_b64 s[0:1], exec
+; GFX942-NEXT: s_mov_b32 s2, 0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: .LBB2_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; GFX942-NEXT: v_readlane_b32 s4, v1, s3
+; GFX942-NEXT: s_bitset0_b64 s[0:1], s3
+; GFX942-NEXT: s_max_u32 s2, s2, s4
+; GFX942-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX942-NEXT: s_cbranch_scc1 .LBB2_1
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: s_mov_b32 s0, s32
+; GFX942-NEXT: v_mov_b32_e32 v1, s0
+; GFX942-NEXT: v_lshl_add_u32 v1, s2, 6, v1
+; GFX942-NEXT: scratch_store_dword off, v0, s0
+; GFX942-NEXT: v_readfirstlane_b32 s32, v1
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -104,31 +153,47 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_alloca_and_call() {
-; CHECK-LABEL: test_alloca_and_call:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_getpc_b64 s[0:1]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s1, s1
-; CHECK-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_mov_b32 s2, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s2, 0x200
-; CHECK-NEXT: scratch_store_b32 off, v0, s2
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca_and_call:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_getpc_b64 s[0:1]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s1, s1
+; GFX12-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_mov_b32 s2, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s2, 0x200
+; GFX12-NEXT: scratch_store_b32 off, v0, s2
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca_and_call:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: s_mov_b32 s2, s32
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_add_i32 s32, s2, 0x400
+; GFX942-NEXT: scratch_store_dword off, v0, s2
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -140,36 +205,56 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_alloca_and_call_var_uniform(i32 inreg %count) {
-; CHECK-LABEL: test_alloca_and_call_var_uniform:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_getpc_b64 s[2:3]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s3, s3
-; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
-; CHECK-NEXT: s_lshl_b32 s0, s0, 2
-; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
-; CHECK-NEXT: s_add_co_i32 s0, s0, 15
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_and_b32 s0, s0, -16
-; CHECK-NEXT: s_mov_b32 s1, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_lshl_b32 s0, s0, 5
-; CHECK-NEXT: scratch_store_b32 off, v0, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s1, s0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca_and_call_var_uniform:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_getpc_b64 s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s3, s3
+; GFX12-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; GFX12-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX12-NEXT: s_add_co_i32 s0, s0, 15
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_and_b32 s0, s0, -16
+; GFX12-NEXT: s_mov_b32 s1, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_lshl_b32 s0, s0, 5
+; GFX12-NEXT: scratch_store_b32 off, v0, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s1, s0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca_and_call_var_uniform:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_lshl_b32 s0, s0, 2
+; GFX942-NEXT: s_add_i32 s0, s0, 15
+; GFX942-NEXT: s_and_b32 s0, s0, -16
+; GFX942-NEXT: s_lshl_b32 s2, s0, 6
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_mov_b32 s3, s32
+; GFX942-NEXT: s_add_i32 s32, s3, s2
+; GFX942-NEXT: scratch_store_dword off, v0, s3
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -181,46 +266,76 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_alloca_and_call_var(i32 %count) {
-; CHECK-LABEL: test_alloca_and_call_var:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
-; CHECK-NEXT: s_mov_b32 s1, exec_lo
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
-; CHECK-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_ctz_i32_b32 s2, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readlane_b32 s3, v1, s2
-; CHECK-NEXT: s_bitset0_b32 s1, s2
-; CHECK-NEXT: s_max_u32 s0, s0, s3
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_cmp_lg_u32 s1, 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB5_1
-; CHECK-NEXT: ; %bb.2:
-; CHECK-NEXT: s_getpc_b64 s[2:3]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s3, s3
-; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
-; CHECK-NEXT: s_mov_b32 s1, s32
-; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
-; CHECK-NEXT: v_lshl_add_u32 v1, s0, 5, s1
-; CHECK-NEXT: scratch_store_b32 off, v0, s1
-; CHECK-NEXT: v_readfirstlane_b32 s32, v1
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xf1ff
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_alloca_and_call_var:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX12-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_and_b32 v1, -16, v0
+; GFX12-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_ctz_i32_b32 s2, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readlane_b32 s3, v1, s2
+; GFX12-NEXT: s_bitset0_b32 s1, s2
+; GFX12-NEXT: s_max_u32 s0, s0, s3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX12-NEXT: ; %bb.2:
+; GFX12-NEXT: s_getpc_b64 s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s3, s3
+; GFX12-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; GFX12-NEXT: s_mov_b32 s1, s32
+; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX12-NEXT: v_lshl_add_u32 v1, s0, 5, s1
+; GFX12-NEXT: scratch_store_b32 off, v0, s1
+; GFX12-NEXT: v_readfirstlane_b32 s32, v1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_alloca_and_call_var:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX942-NEXT: v_and_b32_e32 v1, -16, v0
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: s_mov_b64 s[0:1], exec
+; GFX942-NEXT: s_mov_b32 s2, 0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; GFX942-NEXT: v_readlane_b32 s4, v1, s3
+; GFX942-NEXT: s_bitset0_b64 s[0:1], s3
+; GFX942-NEXT: s_max_u32 s2, s2, s4
+; GFX942-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX942-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s3, s32
+; GFX942-NEXT: v_mov_b32_e32 v1, s3
+; GFX942-NEXT: v_lshl_add_u32 v1, s2, 6, v1
+; GFX942-NEXT: scratch_store_dword off, v0, s3
+; GFX942-NEXT: v_readfirstlane_b32 s32, v1
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -232,30 +347,46 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_call_and_alloca() {
-; CHECK-LABEL: test_call_and_alloca:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_getpc_b64 s[0:1]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s1, s1
-; CHECK-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; CHECK-NEXT: s_mov_b32 s4, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s4, 0x200
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: scratch_store_b32 off, v0, s4
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_call_and_alloca:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_getpc_b64 s[0:1]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s1, s1
+; GFX12-NEXT: s_add_co_u32 s0, s0, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s1, s1, foo at gotpcrel32@hi+24
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
+; GFX12-NEXT: s_mov_b32 s4, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s4, 0x200
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: scratch_store_b32 off, v0, s4
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_call_and_alloca:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: s_mov_b32 s4, s32
+; GFX942-NEXT: s_add_i32 s32, s4, 0x400
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: v_mov_b32_e32 v0, 0
+; GFX942-NEXT: scratch_store_dword off, v0, s4
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -267,36 +398,56 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_call_and_alloca_var_uniform(i32 inreg %count) {
-; CHECK-LABEL: test_call_and_alloca_var_uniform:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_getpc_b64 s[2:3]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s3, s3
-; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
-; CHECK-NEXT: s_lshl_b32 s0, s0, 2
-; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
-; CHECK-NEXT: s_add_co_i32 s0, s0, 15
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_and_b32 s0, s0, -16
-; CHECK-NEXT: s_mov_b32 s4, s32
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_lshl_b32 s0, s0, 5
-; CHECK-NEXT: v_mov_b32_e32 v40, 0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_i32 s32, s4, s0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
-; CHECK-NEXT: scratch_store_b32 off, v40, s4
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_call_and_alloca_var_uniform:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_getpc_b64 s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s3, s3
+; GFX12-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; GFX12-NEXT: s_lshl_b32 s0, s0, 2
+; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX12-NEXT: s_add_co_i32 s0, s0, 15
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_and_b32 s0, s0, -16
+; GFX12-NEXT: s_mov_b32 s4, s32
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_lshl_b32 s0, s0, 5
+; GFX12-NEXT: v_mov_b32_e32 v40, 0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_i32 s32, s4, s0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT: scratch_store_b32 off, v40, s4
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_call_and_alloca_var_uniform:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_lshl_b32 s0, s0, 2
+; GFX942-NEXT: s_add_i32 s0, s0, 15
+; GFX942-NEXT: s_and_b32 s0, s0, -16
+; GFX942-NEXT: s_lshl_b32 s2, s0, 6
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: s_mov_b32 s4, s32
+; GFX942-NEXT: v_mov_b32_e32 v40, 0
+; GFX942-NEXT: s_add_i32 s32, s4, s2
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: scratch_store_dword off, v40, s4
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
@@ -308,47 +459,78 @@ SW_C: ; preds = %.entry
}
define amdgpu_cs_chain void @test_call_and_alloca_var(i32 %count) {
-; CHECK-LABEL: test_call_and_alloca_var:
-; CHECK: ; %bb.0: ; %.entry
-; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
-; CHECK-NEXT: s_wait_expcnt 0x0
-; CHECK-NEXT: s_wait_samplecnt 0x0
-; CHECK-NEXT: s_wait_bvhcnt 0x0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: v_lshl_add_u32 v0, v8, 2, 15
-; CHECK-NEXT: v_mov_b32_e32 v40, 0
-; CHECK-NEXT: s_mov_b32 s1, exec_lo
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_mov_b32 s32, 16
-; CHECK-NEXT: v_and_b32_e32 v0, -16, v0
-; CHECK-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_ctz_i32_b32 s2, s1
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readlane_b32 s3, v0, s2
-; CHECK-NEXT: s_bitset0_b32 s1, s2
-; CHECK-NEXT: s_max_u32 s0, s0, s3
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_cmp_lg_u32 s1, 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB8_1
-; CHECK-NEXT: ; %bb.2:
-; CHECK-NEXT: s_getpc_b64 s[2:3]
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_sext_i32_i16 s3, s3
-; CHECK-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
-; CHECK-NEXT: s_wait_alu 0xfffe
-; CHECK-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
-; CHECK-NEXT: s_mov_b32 s4, s32
-; CHECK-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
-; CHECK-NEXT: v_lshl_add_u32 v0, s0, 5, s4
-; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; CHECK-NEXT: v_readfirstlane_b32 s32, v0
-; CHECK-NEXT: s_wait_kmcnt 0x0
-; CHECK-NEXT: s_wait_alu 0xf1ff
-; CHECK-NEXT: s_swappc_b64 s[30:31], s[2:3]
-; CHECK-NEXT: scratch_store_b32 off, v40, s4
-; CHECK-NEXT: s_endpgm
+; GFX12-LABEL: test_call_and_alloca_var:
+; GFX12: ; %bb.0: ; %.entry
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX12-NEXT: v_mov_b32_e32 v40, 0
+; GFX12-NEXT: s_mov_b32 s1, exec_lo
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_mov_b32 s32, 16
+; GFX12-NEXT: v_and_b32_e32 v0, -16, v0
+; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_ctz_i32_b32 s2, s1
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readlane_b32 s3, v0, s2
+; GFX12-NEXT: s_bitset0_b32 s1, s2
+; GFX12-NEXT: s_max_u32 s0, s0, s3
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX12-NEXT: ; %bb.2:
+; GFX12-NEXT: s_getpc_b64 s[2:3]
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_sext_i32_i16 s3, s3
+; GFX12-NEXT: s_add_co_u32 s2, s2, foo at gotpcrel32@lo+12
+; GFX12-NEXT: s_wait_alu 0xfffe
+; GFX12-NEXT: s_add_co_ci_u32 s3, s3, foo at gotpcrel32@hi+24
+; GFX12-NEXT: s_mov_b32 s4, s32
+; GFX12-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
+; GFX12-NEXT: v_lshl_add_u32 v0, s0, 5, s4
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_readfirstlane_b32 s32, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: s_wait_alu 0xf1ff
+; GFX12-NEXT: s_swappc_b64 s[30:31], s[2:3]
+; GFX12-NEXT: scratch_store_b32 off, v40, s4
+; GFX12-NEXT: s_endpgm
+;
+; GFX942-LABEL: test_call_and_alloca_var:
+; GFX942: ; %bb.0: ; %.entry
+; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: v_lshl_add_u32 v0, v8, 2, 15
+; GFX942-NEXT: v_and_b32_e32 v0, -16, v0
+; GFX942-NEXT: v_mov_b32_e32 v40, 0
+; GFX942-NEXT: s_mov_b64 s[0:1], exec
+; GFX942-NEXT: s_mov_b32 s2, 0
+; GFX942-NEXT: s_mov_b32 s32, 16
+; GFX942-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT: s_ff1_i32_b64 s3, s[0:1]
+; GFX942-NEXT: v_readlane_b32 s4, v0, s3
+; GFX942-NEXT: s_bitset0_b64 s[0:1], s3
+; GFX942-NEXT: s_max_u32 s2, s2, s4
+; GFX942-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX942-NEXT: s_cbranch_scc1 .LBB8_1
+; GFX942-NEXT: ; %bb.2:
+; GFX942-NEXT: s_getpc_b64 s[0:1]
+; GFX942-NEXT: s_add_u32 s0, s0, foo at gotpcrel32@lo+4
+; GFX942-NEXT: s_addc_u32 s1, s1, foo at gotpcrel32@hi+12
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
+; GFX942-NEXT: s_mov_b32 s4, s32
+; GFX942-NEXT: v_mov_b32_e32 v0, s4
+; GFX942-NEXT: v_lshl_add_u32 v0, s2, 6, v0
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_readfirstlane_b32 s32, v0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_swappc_b64 s[30:31], s[0:1]
+; GFX942-NEXT: scratch_store_dword off, v40, s4
+; GFX942-NEXT: s_endpgm
.entry:
br label %SW_C
More information about the llvm-commits
mailing list