[llvm] r264589 - AMDGPU/SI: Limit load clustering to 16 bytes instead of 4 instructions

Tom Stellard via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 28 09:10:13 PDT 2016


Author: tstellar
Date: Mon Mar 28 11:10:13 2016
New Revision: 264589

URL: http://llvm.org/viewvc/llvm-project?rev=264589&view=rev
Log:
AMDGPU/SI: Limit load clustering to 16 bytes instead of 4 instructions

Summary:
This helps prevent load clustering from drastically increasing register
pressure by trying to cluster 4 SMRDx8 loads together.  The limit of 16
bytes was chosen, because it seems like that was the original intent
of setting the limit to 4 instructions, but more analysis could show
that a different limit is better.

This fixes yields small decreases in register usage with shader-db, but
also helps avoid a large increase in register usage when lane mask
tracking is enabled in the machine scheduler, because lane mask tracking
enables more opportunities for load clustering.

shader-db stats:

2379 shaders in 477 tests
Totals:
SGPRS: 49744 -> 48600 (-2.30 %)
VGPRS: 34120 -> 34076 (-0.13 %)
Code Size: 1282888 -> 1283184 (0.02 %) bytes
LDS: 28 -> 28 (0.00 %) blocks
Scratch: 495616 -> 492544 (-0.62 %) bytes per wave
Max Waves: 6843 -> 6853 (0.15 %)
Wait states: 0 -> 0 (0.00 %)

Reviewers: nhaehnle, arsenm

Subscribers: arsenm, llvm-commits

Differential Revision: http://reviews.llvm.org/D18451

Modified:
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll
    llvm/trunk/test/CodeGen/AMDGPU/madak.ll
    llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp?rev=264589&r1=264588&r2=264589&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp Mon Mar 28 11:10:13 2016
@@ -295,18 +295,43 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(
 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
                                      MachineInstr *SecondLdSt,
                                      unsigned NumLoads) const {
-  // TODO: This needs finer tuning
-  if (NumLoads > 4)
+	const MachineOperand *FirstDst = nullptr;
+	const MachineOperand *SecondDst = nullptr;
+
+  if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) {
+    FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdst);
+    SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdst);
+  }
+
+  if (isSMRD(*FirstLdSt) && isSMRD(*FirstLdSt)) {
+    FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::sdst);
+    SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::sdst);
+  }
+
+  if ((isMUBUF(*FirstLdSt) && isMUBUF(*SecondLdSt)) ||
+      (isMTBUF(*FirstLdSt) && isMTBUF(*SecondLdSt))) {
+    FirstDst = getNamedOperand(*FirstLdSt, AMDGPU::OpName::vdata);
+    SecondDst = getNamedOperand(*SecondLdSt, AMDGPU::OpName::vdata);
+  }
+
+  if (!FirstDst || !SecondDst)
     return false;
 
-  if (isDS(*FirstLdSt) && isDS(*SecondLdSt))
-    return true;
+  // Try to limit clustering based on the total number of bytes loaded
+  // rather than the number of instructions.  This is done to help reduce
+  // register pressure.  The method used is somewhat inexact, though,
+  // because it assumes that all loads in the cluster will load the
+  // same number of bytes as FirstLdSt.
+
+  // The unit of this value is bytes.
+  // FIXME: This needs finer tuning.
+  unsigned LoadClusterThreshold = 16;
 
-  if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt))
-    return true;
+  const MachineRegisterInfo &MRI =
+      FirstLdSt->getParent()->getParent()->getRegInfo();
+  const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
 
-  return (isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) &&
-    (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt));
+  return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
 }
 
 void

Modified: llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll?rev=264589&r1=264588&r2=264589&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/ctpop.ll Mon Mar 28 11:10:13 2016
@@ -60,7 +60,7 @@ define void @v_ctpop_add_chain_i32(i32 a
 
 ; FUNC-LABEL: {{^}}v_ctpop_add_sgpr_i32:
 ; GCN: buffer_load_dword [[VAL0:v[0-9]+]],
-; GCN-NEXT: s_waitcnt
+; GCN: s_waitcnt
 ; GCN-NEXT: v_bcnt_u32_b32_e64 [[RESULT:v[0-9]+]], [[VAL0]], s{{[0-9]+}}
 ; GCN-NEXT: buffer_store_dword [[RESULT]],
 ; GCN: s_endpgm

Modified: llvm/trunk/test/CodeGen/AMDGPU/madak.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/madak.ll?rev=264589&r1=264588&r2=264589&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/madak.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/madak.ll Mon Mar 28 11:10:13 2016
@@ -101,7 +101,7 @@ define void @madak_inline_imm_f32(float
 
 ; We can't use an SGPR when forming madak
 ; GCN-LABEL: {{^}}s_v_madak_f32:
-; GCN: s_load_dword [[SB:s[0-9]+]]
+; GCN-DAG: s_load_dword [[SB:s[0-9]+]]
 ; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x41200000
 ; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]]
 ; GCN-NOT: v_madak_f32

Modified: llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll?rev=264589&r1=264588&r2=264589&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/schedule-kernel-arg-loads.ll Mon Mar 28 11:10:13 2016
@@ -2,17 +2,19 @@
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=FUNC -check-prefix=VI --check-prefix=GCN %s
 
 ; FUNC-LABEL: {{^}}cluster_arg_loads:
+; FIXME: Due to changes in the load clustering heuristics.  We now longer
+;        cluster all argument loads together.
+; SI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
+; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
 ; SI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x9
 ; SI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xb
-; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd
-; SI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0xe
-; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
+; VI: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
 ; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
+; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
 ; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x34
+; VI: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x24
 ; VI-NEXT: s_nop 0
-; VI-NEXT: s_load_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0x38
+; VI-NEXT: s_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x2c
 define void @cluster_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) nounwind {
   store i32 %x, i32 addrspace(1)* %out0, align 4
   store i32 %y, i32 addrspace(1)* %out1, align 4




More information about the llvm-commits mailing list