[llvm] 7b76a5c - AMDGPU: Fix fixed ABI SGPR arguments

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 6 06:01:27 PDT 2020


Author: Matt Arsenault
Date: 2020-07-06T09:01:18-04:00
New Revision: 7b76a5c8a2a66684bffb19b37e851ebd39519541

URL: https://github.com/llvm/llvm-project/commit/7b76a5c8a2a66684bffb19b37e851ebd39519541
DIFF: https://github.com/llvm/llvm-project/commit/7b76a5c8a2a66684bffb19b37e851ebd39519541.diff

LOG: AMDGPU: Fix fixed ABI SGPR arguments

The default constructor wasn't setting isSet o the ArgDescriptor, so
while these had the value set, they were treated as missing. This only
ended up mattering in the indirect call case (and for regular calls in
GlobalISel, which current doesn't have a way to support the variable
ABI).

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
    llvm/test/CodeGen/AMDGPU/indirect-call.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
index 69e48227e732..f41e774b34b4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUArgumentUsageInfo.cpp
@@ -142,19 +142,20 @@ AMDGPUFunctionArgInfo::getPreloadedValue(
 
 constexpr AMDGPUFunctionArgInfo AMDGPUFunctionArgInfo::fixedABILayout() {
   AMDGPUFunctionArgInfo AI;
-  AI.PrivateSegmentBuffer = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
-  AI.DispatchPtr = AMDGPU::SGPR4_SGPR5;
-  AI.QueuePtr = AMDGPU::SGPR6_SGPR7;
+  AI.PrivateSegmentBuffer
+    = ArgDescriptor::createRegister(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3);
+  AI.DispatchPtr = ArgDescriptor::createRegister(AMDGPU::SGPR4_SGPR5);
+  AI.QueuePtr = ArgDescriptor::createRegister(AMDGPU::SGPR6_SGPR7);
 
   // Do not pass kernarg segment pointer, only pass increment version in its
   // place.
-  AI.ImplicitArgPtr = AMDGPU::SGPR8_SGPR9;
-  AI.DispatchID = AMDGPU::SGPR10_SGPR11;
+  AI.ImplicitArgPtr = ArgDescriptor::createRegister(AMDGPU::SGPR8_SGPR9);
+  AI.DispatchID = ArgDescriptor::createRegister(AMDGPU::SGPR10_SGPR11);
 
   // Skip FlatScratchInit/PrivateSegmentSize
-  AI.WorkGroupIDX = AMDGPU::SGPR12;
-  AI.WorkGroupIDY = AMDGPU::SGPR13;
-  AI.WorkGroupIDZ = AMDGPU::SGPR14;
+  AI.WorkGroupIDX = ArgDescriptor::createRegister(AMDGPU::SGPR12);
+  AI.WorkGroupIDY = ArgDescriptor::createRegister(AMDGPU::SGPR13);
+  AI.WorkGroupIDZ = ArgDescriptor::createRegister(AMDGPU::SGPR14);
 
   const unsigned Mask = 0x3ff;
   AI.WorkItemIDX = ArgDescriptor::createRegister(AMDGPU::VGPR31, Mask);

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-call.ll b/llvm/test/CodeGen/AMDGPU/indirect-call.ll
index 8432d2961f04..dacc77b49992 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-call.ll
@@ -81,16 +81,19 @@ define amdgpu_kernel void @test_indirect_call_sgpr_ptr() {
 ; GCN-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-NEXT:    s_add_u32 s0, s0, s17
 ; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_getpc_b64 s[4:5]
-; GCN-NEXT:    s_add_u32 s4, s4, gv.fptr0 at rel32@lo+4
-; GCN-NEXT:    s_addc_u32 s5, s5, gv.fptr0 at rel32@hi+4
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_mov_b32 s13, s15
+; GCN-NEXT:    s_mov_b32 s12, s14
+; GCN-NEXT:    s_getpc_b64 s[14:15]
+; GCN-NEXT:    s_add_u32 s14, s14, gv.fptr0 at rel32@lo+4
+; GCN-NEXT:    s_addc_u32 s15, s15, gv.fptr0 at rel32@hi+4
+; GCN-NEXT:    s_load_dwordx2 s[18:19], s[14:15], 0x0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GCN-NEXT:    v_or_b32_e32 v31, v0, v2
+; GCN-NEXT:    s_mov_b32 s14, s16
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[18:19]
 ; GCN-NEXT:    s_endpgm
   %fptr = load void()*, void()* addrspace(4)* @gv.fptr0
   call void %fptr()
@@ -174,17 +177,20 @@ define amdgpu_kernel void @test_indirect_call_sgpr_ptr_arg() {
 ; GCN-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GCN-NEXT:    s_add_u32 s0, s0, s17
 ; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_getpc_b64 s[4:5]
-; GCN-NEXT:    s_add_u32 s4, s4, gv.fptr1 at rel32@lo+4
-; GCN-NEXT:    s_addc_u32 s5, s5, gv.fptr1 at rel32@hi+4
+; GCN-NEXT:    s_mov_b32 s13, s15
+; GCN-NEXT:    s_mov_b32 s12, s14
+; GCN-NEXT:    s_getpc_b64 s[14:15]
+; GCN-NEXT:    s_add_u32 s14, s14, gv.fptr1 at rel32@lo+4
+; GCN-NEXT:    s_addc_u32 s15, s15, gv.fptr1 at rel32@hi+4
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 20, v2
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GCN-NEXT:    s_load_dwordx2 s[18:19], s[14:15], 0x0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 10, v1
 ; GCN-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GCN-NEXT:    v_or_b32_e32 v31, v0, v2
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GCN-NEXT:    s_mov_b32 s14, s16
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[18:19]
 ; GCN-NEXT:    s_endpgm
   %fptr = load void(i32)*, void(i32)* addrspace(4)* @gv.fptr1
   call void %fptr(i32 123)


        


More information about the llvm-commits mailing list