[llvm] r305494 - DivergencyAnalysis patch for review

Alexander Timofeev via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 15 12:33:10 PDT 2017


Author: alex-t
Date: Thu Jun 15 14:33:10 2017
New Revision: 305494

URL: http://llvm.org/viewvc/llvm-project?rev=305494&view=rev
Log:
DivergencyAnalysis patch for review

Added:
    llvm/trunk/test/CodeGen/AMDGPU/always-uniform.ll
Modified:
    llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h
    llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h
    llvm/trunk/include/llvm/CodeGen/BasicTTIImpl.h
    llvm/trunk/lib/Analysis/DivergenceAnalysis.cpp
    llvm/trunk/lib/Analysis/TargetTransformInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
    llvm/trunk/test/CodeGen/AMDGPU/global_smrd_cfg.ll

Modified: llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h (original)
+++ llvm/trunk/include/llvm/Analysis/TargetTransformInfo.h Thu Jun 15 14:33:10 2017
@@ -235,6 +235,11 @@ public:
   /// starting with the sources of divergence.
   bool isSourceOfDivergence(const Value *V) const;
 
+  // \brief Returns true for the target specific
+  // set of operations which produce uniform result
+  // even taking non-unform arguments
+  bool isAlwaysUniform(const Value *V) const;
+
   /// Returns the address space ID for a target's 'flat' address space. Note
   /// this is not necessarily the same as addrspace(0), which LLVM sometimes
   /// refers to as the generic address space. The flat address space is a
@@ -821,6 +826,7 @@ public:
   virtual int getUserCost(const User *U) = 0;
   virtual bool hasBranchDivergence() = 0;
   virtual bool isSourceOfDivergence(const Value *V) = 0;
+  virtual bool isAlwaysUniform(const Value *V) = 0;
   virtual unsigned getFlatAddressSpace() = 0;
   virtual bool isLoweredToCall(const Function *F) = 0;
   virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
@@ -998,6 +1004,10 @@ public:
     return Impl.isSourceOfDivergence(V);
   }
 
+  bool isAlwaysUniform(const Value *V) override {
+    return Impl.isAlwaysUniform(V);
+  }
+
   unsigned getFlatAddressSpace() override {
     return Impl.getFlatAddressSpace();
   }

Modified: llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h (original)
+++ llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h Thu Jun 15 14:33:10 2017
@@ -177,6 +177,8 @@ public:
 
   bool isSourceOfDivergence(const Value *V) { return false; }
 
+  bool isAlwaysUniform(const Value *V) { return false; }
+
   unsigned getFlatAddressSpace () {
     return -1;
   }

Modified: llvm/trunk/include/llvm/CodeGen/BasicTTIImpl.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/BasicTTIImpl.h?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/BasicTTIImpl.h (original)
+++ llvm/trunk/include/llvm/CodeGen/BasicTTIImpl.h Thu Jun 15 14:33:10 2017
@@ -93,6 +93,8 @@ public:
 
   bool isSourceOfDivergence(const Value *V) { return false; }
 
+  bool isAlwaysUniform(const Value *V) { return false; }
+
   unsigned getFlatAddressSpace() {
     // Return an invalid address space.
     return -1;

Modified: llvm/trunk/lib/Analysis/DivergenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/DivergenceAnalysis.cpp?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/DivergenceAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/DivergenceAnalysis.cpp Thu Jun 15 14:33:10 2017
@@ -241,7 +241,7 @@ void DivergencePropagator::exploreDataDe
   // Follow def-use chains of V.
   for (User *U : V->users()) {
     Instruction *UserInst = cast<Instruction>(U);
-    if (DV.insert(UserInst).second)
+    if (!TTI.isAlwaysUniform(U) && DV.insert(UserInst).second)
       Worklist.push_back(UserInst);
   }
 }

Modified: llvm/trunk/lib/Analysis/TargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/TargetTransformInfo.cpp?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/TargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Analysis/TargetTransformInfo.cpp Thu Jun 15 14:33:10 2017
@@ -103,6 +103,10 @@ bool TargetTransformInfo::isSourceOfDive
   return TTIImpl->isSourceOfDivergence(V);
 }
 
+bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
+  return TTIImpl->isAlwaysUniform(V);
+}
+
 unsigned TargetTransformInfo::getFlatAddressSpace() const {
   return TTIImpl->getFlatAddressSpace();
 }

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp Thu Jun 15 14:33:10 2017
@@ -107,7 +107,7 @@ bool AMDGPUAnnotateUniformValues::isClob
 
   DFS(Start, Checklist);
   for (auto &BB : Checklist) {
-    BasicBlock::iterator StartIt = (BB == Load->getParent()) ?
+    BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
      BasicBlock::iterator(Load) : BB->end();
      if (MDR->getPointerDependencyFrom(MemoryLocation(Ptr),
        true, StartIt, BB, Load).isClobber())

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Thu Jun 15 14:33:10 2017
@@ -489,6 +489,19 @@ bool AMDGPUTTIImpl::isSourceOfDivergence
   return false;
 }
 
+bool AMDGPUTTIImpl::isAlwaysUniform(const Value *V) const {
+  if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(V)) {
+    switch (Intrinsic->getIntrinsicID()) {
+    default:
+      return false;
+    case Intrinsic::amdgcn_readfirstlane:
+    case Intrinsic::amdgcn_readlane:
+      return true;
+    }
+  }
+  return false;
+}
+
 unsigned AMDGPUTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
                                        Type *SubTp) {
   if (ST->hasVOP3PInsts()) {

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h Thu Jun 15 14:33:10 2017
@@ -103,6 +103,7 @@ public:
 
   int getVectorInstrCost(unsigned Opcode, Type *ValTy, unsigned Index);
   bool isSourceOfDivergence(const Value *V) const;
+  bool isAlwaysUniform(const Value *V) const;
 
   unsigned getFlatAddressSpace() const {
     // Don't bother running InferAddressSpaces pass on graphics shaders which

Added: llvm/trunk/test/CodeGen/AMDGPU/always-uniform.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/always-uniform.ll?rev=305494&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/always-uniform.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/always-uniform.ll Thu Jun 15 14:33:10 2017
@@ -0,0 +1,21 @@
+; RUN: llc -mtriple amdgcn-amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.readfirstlane(i32)
+
+; GCN-LABEL: readfirstlane_uniform
+; GCN: 	s_load_dwordx2 s{{\[}}[[IN_ADDR:[0-9]+]]:1{{\]}}, s[4:5], 0x0
+; GCN:  v_readfirstlane_b32 s[[SCALAR:[0-9]+]], v0
+; GCN: 	s_add_u32 s[[LOAD_ADDR:[0-9]+]], s[[IN_ADDR]], s[[SCALAR]]
+; GCN:	s_load_dword s{{[0-9]+}}, s{{\[}}[[LOAD_ADDR]]
+
+define amdgpu_kernel void @readfirstlane_uniform(float addrspace(1)* noalias nocapture readonly, float addrspace(1)* noalias nocapture readonly) {
+  %tid = tail call i32 @llvm.amdgcn.workitem.id.x()
+  %scalar = tail call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+  %idx = zext i32 %scalar to i64
+  %gep0 = getelementptr inbounds float, float addrspace(1)* %0, i64 %idx
+  %val = load float, float addrspace(1)* %gep0, align 4
+  %gep1 = getelementptr inbounds float, float addrspace(1)* %1, i64 10
+  store float %val, float addrspace(1)* %gep1, align 4
+  ret void
+}

Modified: llvm/trunk/test/CodeGen/AMDGPU/global_smrd_cfg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/global_smrd_cfg.ll?rev=305494&r1=305493&r2=305494&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/global_smrd_cfg.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/global_smrd_cfg.ll Thu Jun 15 14:33:10 2017
@@ -72,6 +72,39 @@ bb22:
   br i1 %tmp31, label %bb7, label %bb11
 }
 
+; one more test to ensure that aliasing store after the load
+; is considered clobbering if load parent block is the same 
+; as a loop header block.
+
+; CHECK-LABEL: %bb1
+
+; Load from %arg has alias store that is after the load 
+; but is considered clobbering because of the loop.
+
+; CHECK: flat_load_dword
+
+define amdgpu_kernel void @cfg_selfloop(i32 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) #0 {
+bb:
+  br label %bb1
+
+bb2:
+  ret void
+
+bb1:
+  %tmp13 = phi i32 [ %tmp25, %bb1 ], [ 0, %bb ]
+  %tmp14 = srem i32 %tmp13, %arg2
+  %tmp15 = sext i32 %tmp14 to i64
+  %tmp16 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp15
+  %tmp17 = load i32, i32 addrspace(1)* %tmp16, align 4, !tbaa !0
+  %tmp19 = sext i32 %tmp13 to i64
+  %tmp21 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp19
+  store i32 %tmp17, i32 addrspace(1)* %tmp21, align 4, !tbaa !0
+  %tmp25 = add nuw nsw i32 %tmp13, 1
+  %tmp31 = icmp eq i32 %tmp25, 100
+  br i1 %tmp31, label %bb2, label %bb1
+}
+
+
 attributes #0 = { "target-cpu"="fiji" }
 
 !0 = !{!1, !1, i64 0}




More information about the llvm-commits mailing list