[llvm] a065a01 - [AMDGPU] Allow use of StackPtrOffsetReg when building spills

Carl Ritson via llvm-commits llvm-commits at lists.llvm.org
Fri May 15 19:55:29 PDT 2020


Author: Carl Ritson
Date: 2020-05-16T11:54:43+09:00
New Revision: a065a01bf715e4a5cf1b532ea50a4a6d877eeed7

URL: https://github.com/llvm/llvm-project/commit/a065a01bf715e4a5cf1b532ea50a4a6d877eeed7
DIFF: https://github.com/llvm/llvm-project/commit/a065a01bf715e4a5cf1b532ea50a4a6d877eeed7.diff

LOG: [AMDGPU] Allow use of StackPtrOffsetReg when building spills

Summary:
When spilling in the entry function we should be able to borrow
StackPtrOffsetReg as a last resort.  This restores behaviour
removed in D75138, and fixes failures when shaders use all
SGPRs, VGPRs and spill in the entry function.

Reviewers: scott.linder, arsenm, tpr

Reviewed By: scott.linder, arsenm

Subscribers: qcolombet, foad, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, t-tye, hiraditya, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D79776

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
    llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 8a33a13bdbe4..f528b804b1d1 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -687,6 +687,7 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
   MachineFunction *MF = MI->getParent()->getParent();
   const SIInstrInfo *TII = ST.getInstrInfo();
   const MachineFrameInfo &MFI = MF->getFrameInfo();
+  const SIMachineFunctionInfo *FuncInfo = MF->getInfo<SIMachineFunctionInfo>();
 
   const MCInstrDesc &Desc = TII->get(LoadStoreOp);
   const DebugLoc &DL = MI->getDebugLoc();
@@ -725,22 +726,24 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
       SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0, false);
 
     if (!SOffset) {
-      if (!ScratchOffsetReg) {
-        report_fatal_error("could not scavenge SGPR to spill in entry function");
-      }
       // There are no free SGPRs, and since we are in the process of spilling
       // VGPRs too.  Since we need a VGPR in order to spill SGPRs (this is true
       // on SI/CI and on VI it is true until we implement spilling using scalar
       // stores), we have no way to free up an SGPR.  Our solution here is to
-      // add the offset directly to the ScratchOffset register, and then
-      // subtract the offset after the spill to return ScratchOffset to it's
-      // original value.
+      // add the offset directly to the ScratchOffset or StackPtrOffset
+      // register, and then subtract the offset after the spill to return the
+      // register to it's original value.
+      if (!ScratchOffsetReg)
+        ScratchOffsetReg = FuncInfo->getStackPtrOffsetReg();
       SOffset = ScratchOffsetReg;
       ScratchOffsetRegDelta = Offset;
     } else {
       Scavenged = true;
     }
 
+    if (!SOffset)
+      report_fatal_error("could not scavenge SGPR to spill in entry function");
+
     if (ScratchOffsetReg == AMDGPU::NoRegister) {
       BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), SOffset)
           .addImm(Offset);
@@ -811,8 +814,8 @@ void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI,
 
   if (ScratchOffsetRegDelta != 0) {
     // Subtract the offset we added to the ScratchOffset register.
-    BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), ScratchOffsetReg)
-        .addReg(ScratchOffsetReg)
+    BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_SUB_U32), SOffset)
+        .addReg(SOffset)
         .addImm(ScratchOffsetRegDelta);
   }
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index a23461a0a514..323612868772 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -1,6 +1,6 @@
-; RUN: llc -march=amdgcn -mcpu=verde -enable-misched=0 -post-RA-scheduler=0 < %s | FileCheck %s
-; RUN: llc -regalloc=basic -march=amdgcn -mcpu=tonga -enable-misched=0 -post-RA-scheduler=0 < %s | FileCheck %s
- ;
+; RUN: llc -march=amdgcn -mcpu=verde -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX6 %s
+; RUN: llc -regalloc=basic -march=amdgcn -mcpu=tonga -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX7 %s
+;
 ; There is something about Tonga that causes this test to spend a lot of time
 ; in the default register allocator.
 
@@ -34,7 +34,65 @@ entry:
   ret void
 }
 
+; CHECK-LABEL: test_limited_sgpr
+; GFX6: s_add_u32 s32, s32, 0x84100
+; GFX6-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[{{[0-9:]+}}], s32
+; GFX6-NEXT: s_sub_u32 s32, s32, 0x84100
+; GFX6: NumSgprs: 48
+; GFX6: ScratchSize: 8624
+define amdgpu_kernel void @test_limited_sgpr(<64 x i32> addrspace(1)* %out, <64 x i32> addrspace(1)* %in) #0 {
+entry:
+  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+  %tid = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
+
+; allocate enough scratch to go beyond 2^12 addressing
+  %scratch = alloca <1280 x i32>, align 8, addrspace(5)
+
+; load VGPR data
+  %aptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %in, i32 %tid
+  %a = load <64 x i32>, <64 x i32> addrspace(1)* %aptr
+
+; make sure scratch is used
+  %x = extractelement <64 x i32> %a, i32 0
+  %sptr0 = getelementptr <1280 x i32>, <1280 x i32> addrspace(5)* %scratch, i32 %x, i32 0
+  store i32 1, i32 addrspace(5)* %sptr0
+
+; fill up SGPRs
+  %sgpr0 = call <8 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr1 = call <8 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr2 = call <8 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr3 = call <8 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr4 = call <4 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr5 = call <2 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr6 = call <2 x i32> asm sideeffect "; def $0", "=s" ()
+  %sgpr7 = call i32 asm sideeffect "; def $0", "=s" ()
+
+  %cmp = icmp eq i32 %x, 0
+  br i1 %cmp, label %bb0, label %ret
+
+bb0:
+; create SGPR pressure
+  call void asm sideeffect "; use $0,$1,$2,$3,$4,$5,$6", "s,s,s,s,s,s,s,s"(<8 x i32> %sgpr0, <8 x i32> %sgpr1, <8 x i32> %sgpr2, <8 x i32> %sgpr3, <4 x i32> %sgpr4, <2 x i32> %sgpr5, <2 x i32> %sgpr6, i32 %sgpr7)
+
+; mark most VGPR registers as used to increase register pressure
+  call void asm sideeffect "", "~{v4},~{v8},~{v12},~{v16},~{v20},~{v24},~{v28},~{v32}" ()
+  call void asm sideeffect "", "~{v36},~{v40},~{v44},~{v48},~{v52},~{v56},~{v60},~{v64}" ()
+  call void asm sideeffect "", "~{v68},~{v72},~{v76},~{v80},~{v84},~{v88},~{v92},~{v96}" ()
+  call void asm sideeffect "", "~{v100},~{v104},~{v108},~{v112},~{v116},~{v120},~{v124},~{v128}" ()
+  call void asm sideeffect "", "~{v132},~{v136},~{v140},~{v144},~{v148},~{v152},~{v156},~{v160}" ()
+  call void asm sideeffect "", "~{v164},~{v168},~{v172},~{v176},~{v180},~{v184},~{v188},~{v192}" ()
+  call void asm sideeffect "", "~{v196},~{v200},~{v204},~{v208},~{v212},~{v216},~{v220},~{v224}" ()
+  br label %ret
+
+ret:
+  %outptr = getelementptr <64 x i32>, <64 x i32> addrspace(1)* %out, i32 %tid
+  store <64 x i32> %a, <64 x i32> addrspace(1)* %outptr
+
+  ret void
+}
+
 declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
 declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32) #1
 
+attributes #0 = { "amdgpu-waves-per-eu"="10,10" }
 attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list