[llvm] [WIP][AMDGPU] Improve the handling of `inreg` arguments (PR #133614)
Shilei Tian via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 3 14:59:44 PDT 2025
================
@@ -0,0 +1,107 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx950 -o - %s | FileCheck %s
+
+; arg3 is v0, arg4 is in v1. These should be packed into a lane and extracted with readlane
+define i32 @callee(<8 x i32> inreg %arg0, <8 x i32> inreg %arg1, <2 x i32> inreg %arg2, i32 inreg %arg3, i32 inreg %arg4) {
+; CHECK-LABEL: callee:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_readlane_b32 s0, v0, 1
+; CHECK-NEXT: v_readlane_b32 s1, v0, 0
+; CHECK-NEXT: s_sub_i32 s0, s1, s0
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %add = sub i32 %arg3, %arg4
+ ret i32 %add
+}
+
+define amdgpu_kernel void @kernel(<8 x i32> %arg0, <8 x i32> %arg1, <2 x i32> %arg2, i32 %arg3, i32 %arg4, ptr %p) {
+; CHECK-LABEL: kernel:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_load_dwordx16 s[36:51], s[4:5], 0x0
+; CHECK-NEXT: s_load_dwordx4 s[28:31], s[4:5], 0x40
+; CHECK-NEXT: s_load_dwordx2 s[34:35], s[4:5], 0x50
+; CHECK-NEXT: s_mov_b32 s12, s8
+; CHECK-NEXT: s_add_u32 s8, s4, 0x58
+; CHECK-NEXT: s_mov_b32 s13, s9
+; CHECK-NEXT: s_addc_u32 s9, s5, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_writelane_b32 v1, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_add_u32 s4, s4, callee at gotpcrel32@lo+4
+; CHECK-NEXT: s_addc_u32 s5, s5, callee at gotpcrel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v1, s31, 1
+; CHECK-NEXT: s_load_dwordx2 s[30:31], s[4:5], 0x0
+; CHECK-NEXT: s_mov_b32 s14, s10
+; CHECK-NEXT: s_mov_b64 s[10:11], s[6:7]
+; CHECK-NEXT: s_mov_b64 s[4:5], s[0:1]
+; CHECK-NEXT: s_mov_b64 s[6:7], s[2:3]
+; CHECK-NEXT: v_mov_b32_e32 v31, v0
+; CHECK-NEXT: s_mov_b32 s0, s36
+; CHECK-NEXT: s_mov_b32 s1, s37
+; CHECK-NEXT: s_mov_b32 s2, s38
+; CHECK-NEXT: s_mov_b32 s3, s39
+; CHECK-NEXT: s_mov_b32 s16, s40
+; CHECK-NEXT: s_mov_b32 s17, s41
+; CHECK-NEXT: s_mov_b32 s18, s42
+; CHECK-NEXT: s_mov_b32 s19, s43
+; CHECK-NEXT: s_mov_b32 s20, s44
+; CHECK-NEXT: s_mov_b32 s21, s45
+; CHECK-NEXT: s_mov_b32 s22, s46
+; CHECK-NEXT: s_mov_b32 s23, s47
+; CHECK-NEXT: s_mov_b32 s24, s48
+; CHECK-NEXT: s_mov_b32 s25, s49
+; CHECK-NEXT: s_mov_b32 s26, s50
+; CHECK-NEXT: s_mov_b32 s27, s51
+; CHECK-NEXT: v_mov_b32_e32 v0, v1
+; CHECK-NEXT: s_mov_b32 s32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[30:31]
+; CHECK-NEXT: v_mov_b64_e32 v[2:3], s[34:35]
+; CHECK-NEXT: flat_store_dword v[2:3], v0
+; CHECK-NEXT: s_endpgm
+ %ret = call i32 @callee(<8 x i32> %arg0, <8 x i32> %arg1, <2 x i32> %arg2, i32 %arg3, i32 %arg4)
+ store i32 %ret, ptr %p
+ ret void
+}
+
+define i32 @caller(<8 x i32> inreg %arg0, <8 x i32> inreg %arg1, <2 x i32> inreg %arg2, i32 inreg %arg3, i32 inreg %arg4) {
+; CHECK-LABEL: caller:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s42, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[40:41], -1
+; CHECK-NEXT: scratch_store_dword off, v1, s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[40:41]
+; CHECK-NEXT: v_readlane_b32 s41, v0, 0
+; CHECK-NEXT: s_add_i32 s32, s32, 16
+; CHECK-NEXT: v_readlane_b32 s40, v0, 1
+; CHECK-NEXT: v_writelane_b32 v0, s41, 0
+; CHECK-NEXT: v_writelane_b32 v1, s30, 0
+; CHECK-NEXT: v_writelane_b32 v0, s40, 1
+; CHECK-NEXT: s_getpc_b64 s[40:41]
+; CHECK-NEXT: s_add_u32 s40, s40, callee at gotpcrel32@lo+4
+; CHECK-NEXT: s_addc_u32 s41, s41, callee at gotpcrel32@hi+12
+; CHECK-NEXT: s_load_dwordx2 s[40:41], s[40:41], 0x0
+; CHECK-NEXT: v_writelane_b32 v1, s31, 1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[40:41]
+; CHECK-NEXT: v_readlane_b32 s31, v1, 1
+; CHECK-NEXT: v_readlane_b32 s30, v1, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[0:1], -1
+; CHECK-NEXT: scratch_load_dword v1, off, s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[0:1]
+; CHECK-NEXT: s_mov_b32 s33, s42
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ret = call i32 @callee(<8 x i32> %arg0, <8 x i32> %arg1, <2 x i32> %arg2, i32 %arg3, i32 %arg4)
+ ret i32 %ret
+}
+
+define i32 @tail_caller(<8 x i32> inreg %arg0, <8 x i32> inreg %arg1, <2 x i32> inreg %arg2, i32 inreg %arg3, i32 inreg %arg4) {
+ %ret = tail call i32 @callee(<8 x i32> %arg0, <8 x i32> %arg1, <2 x i32> %arg2, i32 %arg3, i32 %arg4)
----------------
shiltian wrote:
For some reason, it emits an error `error: <unknown>:0:0: ran out of registers during register allocation in function 'tail_caller'` for this tail call version.
https://github.com/llvm/llvm-project/pull/133614
More information about the llvm-commits
mailing list