[llvm] 1a8c571 - [AMDGPU] We would need FP if there is call and caller save VGPR spills

via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 27 22:43:11 PDT 2021


Author: RamNalamothu
Date: 2021-07-28T11:12:55+05:30
New Revision: 1a8c57179a129300c2b0c20032286ded8c7af77c

URL: https://github.com/llvm/llvm-project/commit/1a8c57179a129300c2b0c20032286ded8c7af77c
DIFF: https://github.com/llvm/llvm-project/commit/1a8c57179a129300c2b0c20032286ded8c7af77c.diff

LOG: [AMDGPU] We would need FP if there is call and caller save VGPR spills

Since https://reviews.llvm.org/D98319, determineCalleeSavesSGPR() needs
to consider caller save VGPR spills as well while anticipating if we
require FP.

Fixes: SWDEV-295978

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D106758

Added: 
    llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
    llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll

Removed: 
    llvm/test/CodeGen/AMDGPU/need-fp-from-csr-vgpr-spill.ll


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index c9883d38e08c7..fe0132bb8e802 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1301,10 +1301,13 @@ void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF,
   // If clearing VGPRs changed the mask, we will have some CSR VGPR spills.
   const bool HaveAnyCSRVGPR = SavedRegs != AllSavedRegs;
 
-  // We have to anticipate introducing CSR VGPR spills if we don't have any
-  // stack objects already, since we require an FP if there is a call and stack.
+  // We have to anticipate introducing CSR VGPR spills or spill of caller
+  // save VGPR reserved for SGPR spills as we now always create stack entry
+  // for it, if we don't have any stack objects already, since we require
+  // an FP if there is a call and stack.
   MachineFrameInfo &FrameInfo = MF.getFrameInfo();
-  const bool WillHaveFP = FrameInfo.hasCalls() && HaveAnyCSRVGPR;
+  const bool WillHaveFP =
+      FrameInfo.hasCalls() && (HaveAnyCSRVGPR || MFI->VGPRReservedForSGPRSpill);
 
   // FP will be specially managed like SP.
   if (WillHaveFP || hasFP(MF))

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
index 35d66181db126..4218e321b3b84 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
@@ -266,27 +266,25 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s33(i32 addrspace(1)*
 ; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 3
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_getpc_b64 s[4:5]
 ; GFX9-NEXT:    s_add_u32 s4, s4, external_void_func_void at rel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s5, s5, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; def s33
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s33
 ; GFX9-NEXT:    ;;#ASMEND
-; GFX9-NEXT:    v_readlane_b32 s4, v40, 1
-; GFX9-NEXT:    v_readlane_b32 s33, v40, 0
-; GFX9-NEXT:    v_readlane_b32 s5, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
-; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
@@ -301,27 +299,25 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s33(i32 addrspace(1)*
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s4
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    s_getpc_b64 s[4:5]
 ; GFX10-NEXT:    s_add_u32 s4, s4, external_void_func_void at rel32@lo+4
 ; GFX10-NEXT:    s_addc_u32 s5, s5, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 0
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; def s33
 ; GFX10-NEXT:    ;;#ASMEND
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s33
 ; GFX10-NEXT:    ;;#ASMEND
-; GFX10-NEXT:    v_readlane_b32 s4, v40, 1
-; GFX10-NEXT:    v_readlane_b32 s33, v40, 0
-; GFX10-NEXT:    v_readlane_b32 s5, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
-; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s6, -1
 ; GFX10-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3

diff  --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-csr-vgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-csr-vgpr-spill.ll
deleted file mode 100644
index ea39eb7af5bcb..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-csr-vgpr-spill.ll
+++ /dev/null
@@ -1,118 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck %s
-
-; FP is in CSR range, modified.
-define hidden fastcc void @callee_has_fp() #1 {
-; CHECK-LABEL: callee_has_fp:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_mov_b32 s4, s33
-; CHECK-NEXT:    s_mov_b32 s33, s32
-; CHECK-NEXT:    s_addk_i32 s32, 0x200
-; CHECK-NEXT:    v_mov_b32_e32 v0, 1
-; CHECK-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:4
-; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    s_addk_i32 s32, 0xfe00
-; CHECK-NEXT:    s_mov_b32 s33, s4
-; CHECK-NEXT:    s_setpc_b64 s[30:31]
-  %alloca = alloca i32, addrspace(5)
-  store volatile i32 1, i32 addrspace(5)* %alloca
-  ret void
-}
-
-; Has no stack objects, but introduces them due to the CSR spill. We
-; see the FP modified in the callee with IPRA. We should not have
-; redundant spills of s33 or assert.
-define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
-; CHECK-LABEL: csr_vgpr_spill_fp_callee:
-; CHECK:       ; %bb.0: ; %bb
-; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_mov_b32 s8, s33
-; CHECK-NEXT:    s_mov_b32 s33, s32
-; CHECK-NEXT:    s_addk_i32 s32, 0x400
-; CHECK-NEXT:    s_getpc_b64 s[4:5]
-; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp at rel32@hi+12
-; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
-; CHECK-NEXT:    s_mov_b64 s[6:7], s[30:31]
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    ;;#ASMSTART
-; CHECK-NEXT:    ; clobber csr v40
-; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_addk_i32 s32, 0xfc00
-; CHECK-NEXT:    s_mov_b32 s33, s8
-; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    s_setpc_b64 s[6:7]
-bb:
-  call fastcc void @callee_has_fp()
-  call void asm sideeffect "; clobber csr v40", "~{v40}"()
-  ret void
-}
-
-define amdgpu_kernel void @kernel_call() {
-; CHECK-LABEL: kernel_call:
-; CHECK:       ; %bb.0: ; %bb
-; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
-; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
-; CHECK-NEXT:    s_add_u32 s0, s0, s7
-; CHECK-NEXT:    s_addc_u32 s1, s1, 0
-; CHECK-NEXT:    s_getpc_b64 s[4:5]
-; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_callee at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_callee at rel32@hi+12
-; CHECK-NEXT:    s_mov_b32 s32, 0
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    s_endpgm
-bb:
-  tail call fastcc void @csr_vgpr_spill_fp_callee()
-  ret void
-}
-
-; Same, except with a tail call.
-define internal fastcc void @csr_vgpr_spill_fp_tailcall_callee() #0 {
-; CHECK-LABEL: csr_vgpr_spill_fp_tailcall_callee:
-; CHECK:       ; %bb.0: ; %bb
-; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
-; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
-; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
-; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
-; CHECK-NEXT:    ;;#ASMSTART
-; CHECK-NEXT:    ; clobber csr v40
-; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; CHECK-NEXT:    v_writelane_b32 v1, s33, 0
-; CHECK-NEXT:    s_getpc_b64 s[4:5]
-; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp at rel32@hi+12
-; CHECK-NEXT:    v_readlane_b32 s33, v1, 0
-; CHECK-NEXT:    s_or_saveexec_b64 s[6:7], -1
-; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b64 exec, s[6:7]
-; CHECK-NEXT:    s_setpc_b64 s[4:5]
-bb:
-  call void asm sideeffect "; clobber csr v40", "~{v40}"()
-  tail call fastcc void @callee_has_fp()
-  ret void
-}
-
-define amdgpu_kernel void @kernel_tailcall() {
-; CHECK-LABEL: kernel_tailcall:
-; CHECK:       ; %bb.0: ; %bb
-; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
-; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
-; CHECK-NEXT:    s_add_u32 s0, s0, s7
-; CHECK-NEXT:    s_addc_u32 s1, s1, 0
-; CHECK-NEXT:    s_getpc_b64 s[4:5]
-; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_tailcall_callee at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_tailcall_callee at rel32@hi+12
-; CHECK-NEXT:    s_mov_b32 s32, 0
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    s_endpgm
-bb:
-  tail call fastcc void @csr_vgpr_spill_fp_tailcall_callee()
-  ret void
-}
-
-attributes #0 = { "frame-pointer"="none" noinline }
-attributes #1 = { "frame-pointer"="all" noinline }

diff  --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
new file mode 100644
index 0000000000000..47d38fbfd7365
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -0,0 +1,242 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -O0 -verify-machineinstrs < %s | FileCheck %s
+
+; FP is in CSR range, modified.
+define hidden fastcc void @callee_has_fp() #1 {
+; CHECK-LABEL: callee_has_fp:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 s4, s33
+; CHECK-NEXT:    s_mov_b32 s33, s32
+; CHECK-NEXT:    s_add_i32 s32, s32, 0x200
+; CHECK-NEXT:    v_mov_b32_e32 v0, 1
+; CHECK-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:4
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffe00
+; CHECK-NEXT:    s_mov_b32 s33, s4
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %alloca = alloca i32, addrspace(5)
+  store volatile i32 1, i32 addrspace(5)* %alloca
+  ret void
+}
+
+; Has no stack objects, but introduces them due to the CSR spill. We
+; see the FP modified in the callee with IPRA. We should not have
+; redundant spills of s33 or assert.
+define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
+; CHECK-LABEL: csr_vgpr_spill_fp_callee:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    v_writelane_b32 v1, s33, 2
+; CHECK-NEXT:    s_mov_b32 s33, s32
+; CHECK-NEXT:    s_add_i32 s32, s32, 0x400
+; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT:    v_writelane_b32 v1, s30, 0
+; CHECK-NEXT:    v_writelane_b32 v1, s31, 1
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
+; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; clobber csr v40
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
+; CHECK-NEXT:    v_readlane_b32 s33, v1, 2
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+bb:
+  call fastcc void @callee_has_fp()
+  call void asm sideeffect "; clobber csr v40", "~{v40}"()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_call() {
+; CHECK-LABEL: kernel_call:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s7
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_callee at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+bb:
+  tail call fastcc void @csr_vgpr_spill_fp_callee()
+  ret void
+}
+
+; Same, except with a tail call.
+define internal fastcc void @csr_vgpr_spill_fp_tailcall_callee() #0 {
+; CHECK-LABEL: csr_vgpr_spill_fp_tailcall_callee:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
+; CHECK-NEXT:    v_writelane_b32 v1, s33, 0
+; CHECK-NEXT:    ;;#ASMSTART
+; CHECK-NEXT:    ; clobber csr v40
+; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, callee_has_fp at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, callee_has_fp at rel32@hi+12
+; CHECK-NEXT:    v_readlane_b32 s33, v1, 0
+; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_mov_b64 exec, s[6:7]
+; CHECK-NEXT:    s_setpc_b64 s[4:5]
+bb:
+  call void asm sideeffect "; clobber csr v40", "~{v40}"()
+  tail call fastcc void @callee_has_fp()
+  ret void
+}
+
+define amdgpu_kernel void @kernel_tailcall() {
+; CHECK-LABEL: kernel_tailcall:
+; CHECK:       ; %bb.0: ; %bb
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s7
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, csr_vgpr_spill_fp_tailcall_callee at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, csr_vgpr_spill_fp_tailcall_callee at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+bb:
+  tail call fastcc void @csr_vgpr_spill_fp_tailcall_callee()
+  ret void
+}
+
+define hidden i32 @tail_call() #1 {
+; CHECK-LABEL: tail_call:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_mov_b32 s4, s33
+; CHECK-NEXT:    s_mov_b32 s33, s32
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
+; CHECK-NEXT:    s_mov_b32 s33, s4
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  ret i32 0
+}
+
+define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
+; CHECK-LABEL: caller_save_vgpr_spill_fp_tail_call:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    v_writelane_b32 v1, s33, 2
+; CHECK-NEXT:    s_mov_b32 s33, s32
+; CHECK-NEXT:    s_add_i32 s32, s32, 0x400
+; CHECK-NEXT:    v_writelane_b32 v1, s30, 0
+; CHECK-NEXT:    v_writelane_b32 v1, s31, 1
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, tail_call at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, tail_call at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
+; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
+; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
+; CHECK-NEXT:    v_readlane_b32 s33, v1, 2
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %call = call i32 @tail_call()
+  ret i32 %call
+}
+
+define hidden i32 @caller_save_vgpr_spill_fp() #0 {
+; CHECK-LABEL: caller_save_vgpr_spill_fp:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    v_writelane_b32 v2, s33, 2
+; CHECK-NEXT:    s_mov_b32 s33, s32
+; CHECK-NEXT:    s_add_i32 s32, s32, 0x400
+; CHECK-NEXT:    v_writelane_b32 v2, s30, 0
+; CHECK-NEXT:    v_writelane_b32 v2, s31, 1
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, caller_save_vgpr_spill_fp_tail_call at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, caller_save_vgpr_spill_fp_tail_call at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    v_readlane_b32 s30, v2, 0
+; CHECK-NEXT:    v_readlane_b32 s31, v2, 1
+; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
+; CHECK-NEXT:    v_readlane_b32 s33, v2, 2
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
+; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
+; CHECK-NEXT:    s_waitcnt vmcnt(0)
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %call = call i32 @caller_save_vgpr_spill_fp_tail_call()
+  ret i32 %call
+}
+
+define protected amdgpu_kernel void @kernel() {
+; CHECK-LABEL: kernel:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_mov_b32 s32, 0
+; CHECK-NEXT:    s_add_u32 flat_scratch_lo, s4, s7
+; CHECK-NEXT:    s_addc_u32 flat_scratch_hi, s5, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s7
+; CHECK-NEXT:    s_addc_u32 s1, s1, 0
+; CHECK-NEXT:    s_getpc_b64 s[4:5]
+; CHECK-NEXT:    s_add_u32 s4, s4, caller_save_vgpr_spill_fp at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s5, s5, caller_save_vgpr_spill_fp at rel32@hi+12
+; CHECK-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; CHECK-NEXT:    s_mov_b64 s[8:9], s[0:1]
+; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
+; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT:    s_endpgm
+entry:
+  %call = call i32 @caller_save_vgpr_spill_fp()
+  ret void
+}
+
+attributes #0 = { "frame-pointer"="none" noinline }
+attributes #1 = { "frame-pointer"="all" noinline }


        


More information about the llvm-commits mailing list