[llvm] r344698 - AMDGPU: Avoid selecting ds_{read,write}2_b32 on SI

Nicolai Haehnle via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 17 08:37:49 PDT 2018


Author: nha
Date: Wed Oct 17 08:37:48 2018
New Revision: 344698

URL: http://llvm.org/viewvc/llvm-project?rev=344698&view=rev
Log:
AMDGPU: Avoid selecting ds_{read,write}2_b32 on SI

Summary:
To workaround a hardware issue in the (base + offset) calculation
when base is negative. The impact on code quality should be limited
since SILoadStoreOptimizer still runs afterwards and is able to
combine loads/stores based on known sign information.

This fixes visible corruption in Hitman on SI (easily reproducible
by running benchmark mode).

Change-Id: Ia178d207a5e2ac38ae7cd98b532ea2ae74704e5f
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99923

Reviewers: arsenm, mareko

Subscribers: jholewinski, kzhuravl, jvesely, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits

Differential Revision: https://reviews.llvm.org/D53160

Added:
    llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
    llvm/trunk/lib/Target/AMDGPU/DSInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp?rev=344698&r1=344697&r2=344698&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp Wed Oct 17 08:37:48 2018
@@ -978,8 +978,6 @@ bool AMDGPUDAGToDAGISel::SelectDS64Bit4B
 
   // default case
 
-  // FIXME: This is broken on SI where we still need to check if the base
-  // pointer is positive here.
   Base = Addr;
   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);

Modified: llvm/trunk/lib/Target/AMDGPU/DSInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/DSInstructions.td?rev=344698&r1=344697&r2=344698&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/DSInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/DSInstructions.td Wed Oct 17 08:37:48 2018
@@ -728,7 +728,9 @@ class DS64Bit4ByteAlignedWritePat<DS_Pse
               (i1 0))
 >;
 
-let OtherPredicates = [LDSRequiresM0Init] in {
+// v2i32 loads are split into i32 loads on SI during lowering, due to a bug
+// related to bounds checking.
+let OtherPredicates = [LDSRequiresM0Init, isCIVI] in {
 def : DS64Bit4ByteAlignedReadPat<DS_READ2_B32, load_local_m0>;
 def : DS64Bit4ByteAlignedWritePat<DS_WRITE2_B32, store_local_m0>;
 }

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=344698&r1=344697&r2=344698&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Wed Oct 17 08:37:48 2018
@@ -6292,6 +6292,17 @@ SDValue SITargetLowering::LowerLOAD(SDVa
 
     if (NumElements > 2)
       return SplitVectorLoad(Op, DAG);
+
+    // SI has a hardware bug in the LDS / GDS boounds checking: if the base
+    // address is negative, then the instruction is incorrectly treated as
+    // out-of-bounds even if base + offsets is in bounds. Split vectorized
+    // loads here to avoid emitting ds_read2_b32. We may re-combine the
+    // load later in the SILoadStoreOptimizer.
+    if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
+        NumElements == 2 && MemVT.getStoreSize() == 8 &&
+        Load->getAlignment() < 8) {
+      return SplitVectorLoad(Op, DAG);
+    }
   }
   return SDValue();
 }
@@ -6694,6 +6705,18 @@ SDValue SITargetLowering::LowerSTORE(SDV
 
     if (NumElements > 2)
       return SplitVectorStore(Op, DAG);
+
+    // SI has a hardware bug in the LDS / GDS boounds checking: if the base
+    // address is negative, then the instruction is incorrectly treated as
+    // out-of-bounds even if base + offsets is in bounds. Split vectorized
+    // stores here to avoid emitting ds_write2_b32. We may re-combine the
+    // store later in the SILoadStoreOptimizer.
+    if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
+        NumElements == 2 && VT.getStoreSize() == 8 &&
+        Store->getAlignment() < 8) {
+      return SplitVectorStore(Op, DAG);
+    }
+
     return SDValue();
   } else {
     llvm_unreachable("unhandled address space");

Added: llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll?rev=344698&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll Wed Oct 17 08:37:48 2018
@@ -0,0 +1,129 @@
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,NOSI %s
+
+ at compute_lds = external addrspace(3) global [512 x i32], align 16
+
+; GCN-LABEL: {{^}}store_aligned:
+; GCN: ds_write_b64
+define amdgpu_cs void @store_aligned(i32 addrspace(3)* %ptr) #0 {
+entry:
+  %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1
+
+  store i32 42, i32 addrspace(3)* %ptr, align 8
+  store i32 43, i32 addrspace(3)* %ptr.gep.1
+  ret void
+}
+
+
+; GCN-LABEL: {{^}}load_aligned:
+; GCN: ds_read_b64
+define amdgpu_cs <2 x float> @load_aligned(i32 addrspace(3)* %ptr) #0 {
+entry:
+  %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1
+
+  %v.0 = load i32, i32 addrspace(3)* %ptr, align 8
+  %v.1 = load i32, i32 addrspace(3)* %ptr.gep.1
+
+  %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_const_idx:
+; GCN: ds_write2_b32
+define amdgpu_cs void @store_global_const_idx() #0 {
+entry:
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3
+  %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4
+
+  store i32 42, i32 addrspace(3)* %ptr.a
+  store i32 43, i32 addrspace(3)* %ptr.b
+  ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_const_idx:
+; GCN: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_const_idx() #0 {
+entry:
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3
+  %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4
+
+  %v.0 = load i32, i32 addrspace(3)* %ptr.a
+  %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+  %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_var_idx_case1:
+; SI: ds_write_b32
+; SI: ds_write_b32
+; NONSI: ds_write2_b32
+define amdgpu_cs void @store_global_var_idx_case1(i32 %idx) #0 {
+entry:
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx
+  %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+  store i32 42, i32 addrspace(3)* %ptr.a
+  store i32 43, i32 addrspace(3)* %ptr.b
+  ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_var_idx_case1:
+; SI: ds_read_b32
+; SI: ds_read_b32
+; NONSI: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_var_idx_case1(i32 %idx) #0 {
+entry:
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx
+  %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+  %v.0 = load i32, i32 addrspace(3)* %ptr.a
+  %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+  %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+
+
+; GCN-LABEL: {{^}}store_global_var_idx_case2:
+; GCN: ds_write2_b32
+define amdgpu_cs void @store_global_var_idx_case2(i32 %idx) #0 {
+entry:
+  %idx.and = and i32 %idx, 255
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and
+  %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+  store i32 42, i32 addrspace(3)* %ptr.a
+  store i32 43, i32 addrspace(3)* %ptr.b
+  ret void
+}
+
+
+; GCN-LABEL: {{^}}load_global_var_idx_case2:
+; GCN: ds_read2_b32
+define amdgpu_cs <2 x float> @load_global_var_idx_case2(i32 %idx) #0 {
+entry:
+  %idx.and = and i32 %idx, 255
+  %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and
+  %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1
+
+  %v.0 = load i32, i32 addrspace(3)* %ptr.a
+  %v.1 = load i32, i32 addrspace(3)* %ptr.b
+
+  %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0
+  %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1
+  %bc = bitcast <2 x i32> %r.1 to <2 x float>
+  ret <2 x float> %bc
+}
+
+attributes #0 = { nounwind }




More information about the llvm-commits mailing list