[llvm] r214943 - R600: Increase nearby load scheduling threshold.

Matt Arsenault Matthew.Arsenault at amd.com
Tue Aug 5 17:29:49 PDT 2014


Author: arsenm
Date: Tue Aug  5 19:29:49 2014
New Revision: 214943

URL: http://llvm.org/viewvc/llvm-project?rev=214943&view=rev
Log:
R600: Increase nearby load scheduling threshold.

This partially fixes weird looking load scheduling
in memcpy test. The load clustering doesn't seem
particularly smart, but this method seems to be partially
deprecated so it might not be worth trying to fix.

Modified:
    llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp
    llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll

Modified: llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp?rev=214943&r1=214942&r2=214943&view=diff
==============================================================================
--- llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/R600/AMDGPUInstrInfo.cpp Tue Aug  5 19:29:49 2014
@@ -218,15 +218,26 @@ bool AMDGPUInstrInfo::enableClusterLoads
   return true;
 }
 
-bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
-                                             int64_t Offset1, int64_t Offset2,
-                                             unsigned NumLoads) const {
-  assert(Offset2 > Offset1
-         && "Second offset should be larger than first offset!");
-  // If we have less than 16 loads in a row, and the offsets are within 16,
-  // then schedule together.
-  // TODO: Make the loads schedule near if it fits in a cacheline
-  return (NumLoads < 16 && (Offset2 - Offset1) < 16);
+// FIXME: This behaves strangely. If, for example, you have 32 load + stores,
+// the first 16 loads will be interleaved with the stores, and the next 16 will
+// be clustered as expected. It should really split into 2 16 store batches.
+//
+// Loads are clustered until this returns false, rather than trying to schedule
+// groups of stores. This also means we have to deal with saying different
+// address space loads should be clustered, and ones which might cause bank
+// conflicts.
+//
+// This might be deprecated so it might not be worth that much effort to fix.
+bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
+                                              int64_t Offset0, int64_t Offset1,
+                                              unsigned NumLoads) const {
+  assert(Offset1 > Offset0 &&
+         "Second offset should be larger than first offset!");
+  // If we have less than 16 loads in a row, and the offsets are within 64
+  // bytes, then schedule together.
+
+  // A cacheline is 64 bytes (for global memory).
+  return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
 }
 
 bool

Modified: llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll?rev=214943&r1=214942&r2=214943&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll (original)
+++ llvm/trunk/test/CodeGen/R600/llvm.memcpy.ll Tue Aug  5 19:29:49 2014
@@ -15,17 +15,18 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
+
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
-
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
+
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
@@ -35,9 +36,8 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(
 ; SI: DS_READ_U8
 ; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
-; SI: DS_WRITE_B8
 ; SI: DS_READ_U8
-; SI: DS_WRITE_B8
+
 
 ; SI: DS_READ_U8
 ; SI: DS_READ_U8
@@ -47,6 +47,7 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(
 ; SI: DS_READ_U8
 ; SI: DS_READ_U8
 ; SI: DS_READ_U8
+
 ; SI: DS_READ_U8
 ; SI: DS_READ_U8
 ; SI: DS_READ_U8
@@ -65,6 +66,9 @@ declare void @llvm.memcpy.p1i8.p1i8.i64(
 ; SI: DS_WRITE_B8
 ; SI: DS_WRITE_B8
 ; SI: DS_WRITE_B8
+
+; SI: DS_WRITE_B8
+; SI: DS_WRITE_B8
 ; SI: DS_WRITE_B8
 ; SI: DS_WRITE_B8
 ; SI: DS_WRITE_B8
@@ -83,21 +87,13 @@ define void @test_small_memcpy_i64_lds_t
 
 ; FUNC-LABEL: @test_small_memcpy_i64_lds_to_lds_align2
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 ; SI: DS_READ_U16
-; SI: DS_WRITE_B16
 
 ; SI: DS_READ_U16
 ; SI: DS_READ_U16
@@ -117,6 +113,15 @@ define void @test_small_memcpy_i64_lds_t
 ; SI: DS_WRITE_B16
 ; SI: DS_WRITE_B16
 
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+; SI: DS_WRITE_B16
+
 ; SI: S_ENDPGM
 define void @test_small_memcpy_i64_lds_to_lds_align2(i64 addrspace(3)* noalias %out, i64 addrspace(3)* noalias %in) nounwind {
   %bcin = bitcast i64 addrspace(3)* %in to i8 addrspace(3)*
@@ -278,37 +283,37 @@ define void @test_small_memcpy_i64_globa
 
 ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align2
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
-
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
-; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_LOAD_USHORT
+
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
+; SI-DAG: BUFFER_STORE_SHORT
 ; SI-DAG: BUFFER_STORE_SHORT
 
 ; SI: S_ENDPGM
@@ -321,9 +326,9 @@ define void @test_small_memcpy_i64_globa
 
 ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align4
 ; SI: BUFFER_LOAD_DWORDX4
-; SI: BUFFER_STORE_DWORDX4
 ; SI: BUFFER_LOAD_DWORDX4
 ; SI: BUFFER_STORE_DWORDX4
+; SI: BUFFER_STORE_DWORDX4
 ; SI: S_ENDPGM
 define void @test_small_memcpy_i64_global_to_global_align4(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
   %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
@@ -334,9 +339,9 @@ define void @test_small_memcpy_i64_globa
 
 ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align8
 ; SI: BUFFER_LOAD_DWORDX4
-; SI: BUFFER_STORE_DWORDX4
 ; SI: BUFFER_LOAD_DWORDX4
 ; SI: BUFFER_STORE_DWORDX4
+; SI: BUFFER_STORE_DWORDX4
 ; SI: S_ENDPGM
 define void @test_small_memcpy_i64_global_to_global_align8(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
   %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*
@@ -347,9 +352,9 @@ define void @test_small_memcpy_i64_globa
 
 ; FUNC-LABEL: @test_small_memcpy_i64_global_to_global_align16
 ; SI: BUFFER_LOAD_DWORDX4
-; SI: BUFFER_STORE_DWORDX4
 ; SI: BUFFER_LOAD_DWORDX4
 ; SI: BUFFER_STORE_DWORDX4
+; SI: BUFFER_STORE_DWORDX4
 ; SI: S_ENDPGM
 define void @test_small_memcpy_i64_global_to_global_align16(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
   %bcin = bitcast i64 addrspace(1)* %in to i8 addrspace(1)*





More information about the llvm-commits mailing list