[llvm] r309172 - AMDGPU: Fix using SMRD instructions for argument loads in functions

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 13:39:42 PDT 2017


Author: arsenm
Date: Wed Jul 26 13:39:42 2017
New Revision: 309172

URL: http://llvm.org/viewvc/llvm-project?rev=309172&view=rev
Log:
AMDGPU: Fix using SMRD instructions for argument loads in functions

These are not actually uniform values except in kernels.

Modified:
    llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
    llvm/trunk/test/CodeGen/AMDGPU/hsa-func.ll

Modified: llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp?rev=309172&r1=309171&r2=309172&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp Wed Jul 26 13:39:42 2017
@@ -730,16 +730,44 @@ bool isInlinableLiteralV216(int32_t Lite
   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
 }
 
+bool isArgPassedInSGPR(const Argument *A) {
+  const Function *F = A->getParent();
+
+  // Arguments to compute shaders are never a source of divergence.
+  CallingConv::ID CC = F->getCallingConv();
+  switch (CC) {
+  case CallingConv::AMDGPU_KERNEL:
+  case CallingConv::SPIR_KERNEL:
+    return true;
+  case CallingConv::AMDGPU_VS:
+  case CallingConv::AMDGPU_HS:
+  case CallingConv::AMDGPU_GS:
+  case CallingConv::AMDGPU_PS:
+  case CallingConv::AMDGPU_CS:
+    // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
+    // Everything else is in VGPRs.
+    return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
+           F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
+  default:
+    // TODO: Should calls support inreg for SGPR inputs?
+    return false;
+  }
+}
+
+// TODO: Should largely merge with AMDGPUTTIImpl::isSourceOfDivergence.
 bool isUniformMMO(const MachineMemOperand *MMO) {
   const Value *Ptr = MMO->getValue();
   // UndefValue means this is a load of a kernel input.  These are uniform.
   // Sometimes LDS instructions have constant pointers.
   // If Ptr is null, then that means this mem operand contains a
   // PseudoSourceValue like GOT.
-  if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
+  if (!Ptr || isa<UndefValue>(Ptr) ||
       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
     return true;
 
+  if (const Argument *Arg = dyn_cast<Argument>(Ptr))
+    return isArgPassedInSGPR(Arg);
+
   const Instruction *I = dyn_cast<Instruction>(Ptr);
   return I && I->getMetadata("amdgpu.uniform");
 }

Modified: llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h?rev=309172&r1=309171&r2=309172&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h Wed Jul 26 13:39:42 2017
@@ -23,6 +23,7 @@
 
 namespace llvm {
 
+class Argument;
 class FeatureBitset;
 class Function;
 class GlobalValue;
@@ -347,6 +348,7 @@ bool isInlinableLiteral16(int16_t Litera
 LLVM_READNONE
 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi);
 
+bool isArgPassedInSGPR(const Argument *Arg);
 bool isUniformMMO(const MachineMemOperand *MMO);
 
 /// \returns The encoding that will be used for \p ByteOffset in the SMRD

Modified: llvm/trunk/test/CodeGen/AMDGPU/hsa-func.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/hsa-func.ll?rev=309172&r1=309171&r2=309172&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/hsa-func.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/hsa-func.ll Wed Jul 26 13:39:42 2017
@@ -1,7 +1,7 @@
 ; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri | FileCheck --check-prefix=HSA %s
-; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-flat-for-global | FileCheck --check-prefix=HSA-CI %s
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri | FileCheck --check-prefix=HSA-CI %s
 ; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=carrizo  | FileCheck --check-prefix=HSA %s
-; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=carrizo -mattr=-flat-for-global | FileCheck --check-prefix=HSA-VI %s
+; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=carrizo | FileCheck --check-prefix=HSA-VI %s
 ; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri -filetype=obj | llvm-readobj -symbols -s -sd | FileCheck --check-prefix=ELF %s
 ; RUN: llc < %s -mtriple=amdgcn--amdhsa -mcpu=kaveri | llvm-mc -filetype=obj -triple amdgcn--amdhsa -mcpu=kaveri | llvm-readobj -symbols -s -sd | FileCheck %s --check-prefix=ELF
 
@@ -27,7 +27,7 @@
 
 ; ELF: Symbol {
 ; ELF: Name: simple
-; ELF: Size: 48
+; ELF: Size: 36
 ; ELF: Type: Function (0x2)
 ; ELF: }
 
@@ -41,12 +41,9 @@
 ; HSA: .p2align 2
 ; HSA: {{^}}simple:
 ; HSA-NOT: amd_kernel_code_t
-; HSA-NOT: s_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[4:5], 0x0
+; HSA: flat_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[0:1]
 
 ; Make sure we are setting the ATC bit:
-; HSA-CI: s_mov_b32 s[[HI:[0-9]+]], 0x100f000
-; On VI+ we also need to set MTYPE = 2
-; HSA-VI: s_mov_b32 s[[HI:[0-9]+]], 0x1100f000
 ; Make sure we generate flat store for HSA
 ; HSA: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}
 




More information about the llvm-commits mailing list