[llvm] [AMDGPU] Use 32-bit SGPR to save/restore of SCC (PR #68367)
Sirish Pande via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 12 07:43:52 PDT 2023
================
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -march=amdgcn -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX906 %s
+declare float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32>, i32, i32, i32 immarg) #0
+declare void @llvm.amdgcn.raw.buffer.store.f32(float, <4 x i32>, i32, i32, i32 immarg) #1
+
+; Check that the compiler doesn't crash with a "undefined physical register" error;
+; bb.0 sets SCC bit in s_cmp_eq_u32 s0, 1
+; bb.1 overrides it
+; bb.2 uses the value from bb.0
+; Preserve SCC across bb.1 with s_cselect_b32 s5, -1, 0 -> s_and_b32 s0, s5, exec_lo
+; Otherwise, we will see following error.
+;*** Bad machine code: Using an undefined physical register ***
+;- function: foo
+;- basic block: %bb.3 (0x53198c0)
+;- instruction: %33.sub1:sgpr_128 = S_CSELECT_B32 1072693248, 0, implicit $scc
+;- operand 3: implicit $scc
+
+
+define amdgpu_kernel void @foo(i1 %cmp1) {
+; GFX906-LABEL: foo:
+; GFX906: ; %bb.0: ; %entry
+; GFX906-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX906-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX906-NEXT: s_mov_b32 s10, -1
+; GFX906-NEXT: s_mov_b32 s11, 0xe00000
+; GFX906-NEXT: s_add_u32 s8, s8, s3
+; GFX906-NEXT: s_addc_u32 s9, s9, 0
+; GFX906-NEXT: buffer_load_dword v3, off, s[8:11], 0
+; GFX906-NEXT: buffer_load_dword v4, off, s[8:11], 0 offset:4
+; GFX906-NEXT: buffer_load_dword v5, off, s[8:11], 0 offset:8
+; GFX906-NEXT: buffer_load_dword v6, off, s[8:11], 0 offset:12
+; GFX906-NEXT: s_load_dword s4, s[0:1], 0x24
+; GFX906-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x1c
+; GFX906-NEXT: s_waitcnt lgkmcnt(0)
+; GFX906-NEXT: s_bitcmp1_b32 s4, 0
+; GFX906-NEXT: s_mul_i32 s0, s2, s3
+; GFX906-NEXT: v_mul_u32_u24_e32 v1, s3, v1
+; GFX906-NEXT: v_mad_u32_u24 v0, s0, v0, v1
+; GFX906-NEXT: v_add_lshl_u32 v2, v0, v2, 4
+; GFX906-NEXT: v_mov_b32_e32 v0, 0
+; GFX906-NEXT: s_mov_b32 s4, 0
+; GFX906-NEXT: v_mov_b32_e32 v1, v0
+; GFX906-NEXT: s_cselect_b32 s5, -1, 0
+; GFX906-NEXT: s_mov_b64 s[2:3], exec
+; GFX906-NEXT: ds_write_b64 v2, v[0:1]
+; GFX906-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
+; GFX906-NEXT: s_waitcnt vmcnt(3)
+; GFX906-NEXT: v_readfirstlane_b32 s0, v3
+; GFX906-NEXT: s_waitcnt vmcnt(2)
+; GFX906-NEXT: v_readfirstlane_b32 s1, v4
+; GFX906-NEXT: v_cmp_eq_u64_e32 vcc, s[0:1], v[3:4]
+; GFX906-NEXT: s_waitcnt vmcnt(1)
+; GFX906-NEXT: v_readfirstlane_b32 s0, v5
+; GFX906-NEXT: s_waitcnt vmcnt(0)
+; GFX906-NEXT: v_readfirstlane_b32 s1, v6
+; GFX906-NEXT: v_cmp_eq_u64_e64 s[0:1], s[0:1], v[5:6]
+; GFX906-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
+; GFX906-NEXT: s_and_saveexec_b64 s[0:1], s[0:1]
+; GFX906-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6
+; GFX906-NEXT: s_xor_b64 exec, exec, s[0:1]
+; GFX906-NEXT: s_cbranch_execnz .LBB0_1
+; GFX906-NEXT: ; %bb.2:
+; GFX906-NEXT: s_and_b32 s0, s5, exec_lo
+; GFX906-NEXT: s_mov_b64 exec, s[2:3]
+; GFX906-NEXT: s_cselect_b32 s5, 0x3ff00000, 0
+; GFX906-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
+; GFX906-NEXT: s_mov_b32 s5, s4
+; GFX906-NEXT: s_mov_b32 s6, s4
+; GFX906-NEXT: s_mov_b32 s7, s4
+; GFX906-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX906-NEXT: s_endpgm
+entry:
+ %wbr = alloca <4 x i32>, align 16, addrspace(5)
+ store ptr null, ptr addrspace(5) %wbr, align 16
+ %wbr_1 = load <4 x i32>, ptr addrspace(5) null, align 16
+ %call1 = tail call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %wbr_1, i32 0, i32 0, i32 0)
+ %0 = fpext float %call1 to double
----------------
srpande wrote:
Actually intrinsic does matter. That's the one that gets converted into waterfall loop.
https://github.com/llvm/llvm-project/pull/68367
More information about the llvm-commits
mailing list