[llvm] r304554 - AMDGPUAnnotateUniformValue should always treat volatile loads as divergent

Alexander Timofeev via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 2 08:25:52 PDT 2017


Author: alex-t
Date: Fri Jun  2 10:25:52 2017
New Revision: 304554

URL: http://llvm.org/viewvc/llvm-project?rev=304554&view=rev
Log:
AMDGPUAnnotateUniformValue should always treat volatile loads as divergent

Added:
    llvm/trunk/test/CodeGen/AMDGPU/not-scalarize-volatile-load.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SMInstructions.td

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=304554&r1=304553&r2=304554&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Jun  2 10:25:52 2017
@@ -3571,7 +3571,7 @@ SDValue SITargetLowering::LowerLOAD(SDVa
   }
   if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
     if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
-                  isMemOpHasNoClobberedMemOperand(Load))
+        !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
       return SDValue();
     // Non-uniform loads will be selected to MUBUF instructions, so they
     // have the same legalization requirements as global and private

Modified: llvm/trunk/lib/Target/AMDGPU/SMInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SMInstructions.td?rev=304554&r1=304553&r2=304554&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SMInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SMInstructions.td Fri Jun  2 10:25:52 2017
@@ -229,6 +229,7 @@ def smrd_load : PatFrag <(ops node:$ptr)
     ((Ld->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
     static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N)) ||
     (Subtarget->getScalarizeGlobalBehavior() && Ld->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS &&
+    !Ld->isVolatile() &&
     static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N) &&
     static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)));
 }]>;

Added: llvm/trunk/test/CodeGen/AMDGPU/not-scalarize-volatile-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/not-scalarize-volatile-load.ll?rev=304554&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/not-scalarize-volatile-load.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/not-scalarize-volatile-load.ll Fri Jun  2 10:25:52 2017
@@ -0,0 +1,15 @@
+; RUN: llc -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: @volatile_load
+; GCN:  s_load_dwordx2 s{{\[}}[[LO_SREG:[0-9]+]]:[[HI_SREG:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0
+; GCN:  v_mov_b32_e32 v[[LO_VREG:[0-9]+]], s[[LO_SREG]]
+; GCN:  v_mov_b32_e32 v[[HI_VREG:[0-9]+]], s[[HI_SREG]]
+; GCN:  flat_load_dword v{{[0-9]+}}, v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}}
+
+define amdgpu_kernel void @volatile_load(i32 addrspace(1)* %arg, i32 addrspace(1)* nocapture %arg1) {
+bb:
+  %tmp18 = load volatile i32, i32 addrspace(1)* %arg, align 4
+  %tmp26 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 5
+  store i32 %tmp18, i32 addrspace(1)* %tmp26, align 4
+  ret void
+}




More information about the llvm-commits mailing list