[llvm] r260427 - AMDGPU: Release the scavenged offset register during VGPR spill

Nicolai Haehnle via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 10 12:13:58 PST 2016


Author: nha
Date: Wed Feb 10 14:13:58 2016
New Revision: 260427

URL: http://llvm.org/viewvc/llvm-project?rev=260427&view=rev
Log:
AMDGPU: Release the scavenged offset register during VGPR spill

Summary:
This fixes a crash where subsequent spills would be unable to scavenge
a register. In particular, it fixes a crash in piglit's
spec at glsl-1.50@execution at geometry@max-input-components (the test still
has a shader that fails to compile because of too many SGPR spills, but
at least it doesn't crash any more).

This is a candidate for the release branch.

Reviewers: arsenm, tstellarAMD

Subscribers: qcolombet, arsenm

Differential Revision: http://reviews.llvm.org/D16558

Added:
    llvm/trunk/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp?rev=260427&r1=260426&r2=260427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp Wed Feb 10 14:13:58 2016
@@ -234,6 +234,7 @@ void SIRegisterInfo::buildScratchLoadSto
   bool IsLoad = TII->get(LoadStoreOp).mayLoad();
 
   bool RanOutOfSGPRs = false;
+  bool Scavenged = false;
   unsigned SOffset = ScratchOffset;
 
   unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
@@ -244,6 +245,8 @@ void SIRegisterInfo::buildScratchLoadSto
     if (SOffset == AMDGPU::NoRegister) {
       RanOutOfSGPRs = true;
       SOffset = AMDGPU::SGPR0;
+    } else {
+      Scavenged = true;
     }
     BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
             .addReg(ScratchOffset)
@@ -259,10 +262,14 @@ void SIRegisterInfo::buildScratchLoadSto
         getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
         Value;
 
+    unsigned SOffsetRegState = 0;
+    if (i + 1 == e && Scavenged)
+      SOffsetRegState |= RegState::Kill;
+
     BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
       .addReg(SubReg, getDefRegState(IsLoad))
       .addReg(ScratchRsrcReg)
-      .addReg(SOffset)
+      .addReg(SOffset, SOffsetRegState)
       .addImm(Offset)
       .addImm(0) // glc
       .addImm(0) // slc

Added: llvm/trunk/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/spill-scavenge-offset.ll?rev=260427&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/spill-scavenge-offset.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/spill-scavenge-offset.ll Wed Feb 10 14:13:58 2016
@@ -0,0 +1,33 @@
+; RUN: llc -march=amdgcn -mcpu=verde < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck %s
+
+; When the offset of VGPR spills into scratch space gets too large, an additional SGPR
+; is used to calculate the scratch load/store address. Make sure that this
+; mechanism works even when many spills happen.
+
+; Just test that it compiles successfully.
+; CHECK-LABEL: test
+define void @test(<1280 x i32> addrspace(1)* %out, <1280 x i32> addrspace(1)* %in,
+                  <96 x i32> addrspace(1)* %sdata_out, <96 x i32> %sdata_in) {
+entry:
+  %tid = call i32 @llvm.SI.tid() nounwind readnone
+
+  %aptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid
+  %a = load <1280 x i32>, <1280 x i32> addrspace(1)* %aptr
+
+; mark most VGPR registers as used to increase register pressure
+  call void asm sideeffect "", "~{VGPR4},~{VGPR8},~{VGPR12},~{VGPR16},~{VGPR20},~{VGPR24},~{VGPR28},~{VGPR32}" ()
+  call void asm sideeffect "", "~{VGPR36},~{VGPR40},~{VGPR44},~{VGPR48},~{VGPR52},~{VGPR56},~{VGPR60},~{VGPR64}" ()
+  call void asm sideeffect "", "~{VGPR68},~{VGPR72},~{VGPR76},~{VGPR80},~{VGPR84},~{VGPR88},~{VGPR92},~{VGPR96}" ()
+  call void asm sideeffect "", "~{VGPR100},~{VGPR104},~{VGPR108},~{VGPR112},~{VGPR116},~{VGPR120},~{VGPR124},~{VGPR128}" ()
+  call void asm sideeffect "", "~{VGPR132},~{VGPR136},~{VGPR140},~{VGPR144},~{VGPR148},~{VGPR152},~{VGPR156},~{VGPR160}" ()
+  call void asm sideeffect "", "~{VGPR164},~{VGPR168},~{VGPR172},~{VGPR176},~{VGPR180},~{VGPR184},~{VGPR188},~{VGPR192}" ()
+  call void asm sideeffect "", "~{VGPR196},~{VGPR200},~{VGPR204},~{VGPR208},~{VGPR212},~{VGPR216},~{VGPR220},~{VGPR224}" ()
+
+  %outptr = getelementptr <1280 x i32>, <1280 x i32> addrspace(1)* %in, i32 %tid
+  store <1280 x i32> %a, <1280 x i32> addrspace(1)* %outptr
+
+  ret void
+}
+
+declare i32 @llvm.SI.tid() nounwind readnone




More information about the llvm-commits mailing list