[llvm] r346128 - [AMDGPU] Fix the new atomic optimizer in pixel shaders.

Neil Henning via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 5 04:04:48 PST 2018


Author: sheredom
Date: Mon Nov  5 04:04:48 2018
New Revision: 346128

URL: http://llvm.org/viewvc/llvm-project?rev=346128&view=rev
Log:
[AMDGPU] Fix the new atomic optimizer in pixel shaders.

The new atomic optimizer I previously added in D51969 did not work
correctly when a pixel shader was using derivatives, and had helper
lanes active.

To fix this we add an llvm.amdgcn.ps.live call that guards a branch
around the entire atomic operation - ensuring that all helper lanes are
inactive within the wavefront when we compute our atomic results.

I've added a test case that can cause derivatives, and exposes the
problem.

Differential Revision: https://reviews.llvm.org/D53930

Added:
    llvm/trunk/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp?rev=346128&r1=346127&r2=346128&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp Mon Nov  5 04:04:48 2018
@@ -53,6 +53,7 @@ private:
   const DataLayout *DL;
   DominatorTree *DT;
   bool HasDPP;
+  bool IsPixelShader;
 
   void optimizeAtomic(Instruction &I, Instruction::BinaryOps Op,
                       unsigned ValIdx, bool ValDivergent) const;
@@ -96,6 +97,7 @@ bool AMDGPUAtomicOptimizer::runOnFunctio
   const TargetMachine &TM = TPC.getTM<TargetMachine>();
   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
   HasDPP = ST.hasDPP();
+  IsPixelShader = F.getCallingConv() == CallingConv::AMDGPU_PS;
 
   visit(F);
 
@@ -215,6 +217,31 @@ void AMDGPUAtomicOptimizer::optimizeAtom
   // Start building just before the instruction.
   IRBuilder<> B(&I);
 
+  // If we are in a pixel shader, because of how we have to mask out helper
+  // lane invocations, we need to record the entry and exit BB's.
+  BasicBlock *PixelEntryBB = nullptr;
+  BasicBlock *PixelExitBB = nullptr;
+
+  // If we're optimizing an atomic within a pixel shader, we need to wrap the
+  // entire atomic operation in a helper-lane check. We do not want any helper
+  // lanes that are around only for the purposes of derivatives to take part
+  // in any cross-lane communication, and we use a branch on whether the lane is
+  // live to do this.
+  if (IsPixelShader) {
+    // Record I's original position as the entry block.
+    PixelEntryBB = I.getParent();
+
+    Value *const Cond = B.CreateIntrinsic(Intrinsic::amdgcn_ps_live, {}, {});
+    Instruction *const NonHelperTerminator =
+        SplitBlockAndInsertIfThen(Cond, &I, false, nullptr, DT, nullptr);
+
+    // Record I's new position as the exit block.
+    PixelExitBB = I.getParent();
+
+    I.moveBefore(NonHelperTerminator);
+    B.SetInsertPoint(&I);
+  }
+
   Type *const Ty = I.getType();
   const unsigned TyBitWidth = DL->getTypeSizeInBits(Ty);
   Type *const VecTy = VectorType::get(B.getInt32Ty(), 2);
@@ -398,8 +425,18 @@ void AMDGPUAtomicOptimizer::optimizeAtom
   // first lane, to get our lane's index into the atomic result.
   Value *const Result = B.CreateBinOp(Op, BroadcastI, LaneOffset);
 
-  // Replace the original atomic instruction with the new one.
-  I.replaceAllUsesWith(Result);
+  if (IsPixelShader) {
+    // Need a final PHI to reconverge to above the helper lane branch mask.
+    B.SetInsertPoint(PixelExitBB->getFirstNonPHI());
+
+    PHINode *const PHI = B.CreatePHI(Ty, 2);
+    PHI->addIncoming(UndefValue::get(Ty), PixelEntryBB);
+    PHI->addIncoming(Result, I.getParent());
+    I.replaceAllUsesWith(PHI);
+  } else {
+    // Replace the original atomic instruction with the new one.
+    I.replaceAllUsesWith(Result);
+  }
 
   // And delete the original.
   I.eraseFromParent();

Added: llvm/trunk/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll?rev=346128&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/atomic_optimizations_pixelshader.ll Mon Nov  5 04:04:48 2018
@@ -0,0 +1,59 @@
+; RUN: llc -mtriple=amdgcn-- -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX7LESS %s
+; RUN: llc  -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-atomic-optimizations=true -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX8MORE %s
+
+declare i1 @llvm.amdgcn.wqm.vote(i1)
+declare i32 @llvm.amdgcn.buffer.atomic.add(i32, <4 x i32>, i32, i32, i1)
+declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1)
+
+; Show that what the atomic optimization pass will do for raw buffers.
+
+; GCN-LABEL: add_i32_constant:
+; GCN-LABEL: BB0_1:
+; GCN: s_mov_b64 s{{\[}}[[exec_lo:[0-9]+]]:[[exec_hi:[0-9]+]]{{\]}}, exec
+; GCN: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], s[[exec_lo]], 0
+; GCN: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], s[[exec_hi]], v[[mbcnt_lo]]
+; GCN: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
+; GCN: s_bcnt1_i32_b64 s[[popcount:[0-9]+]], s{{\[}}[[exec_lo]]:[[exec_hi]]{{\]}}
+; GCN: v_mul_u32_u24{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[popcount]], 5
+; GCN: buffer_atomic_add v[[value]]
+; GCN: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
+define amdgpu_ps void @add_i32_constant(<4 x i32> inreg %out, <4 x i32> inreg %inout) {
+entry:
+  %cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+  %old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 5, <4 x i32> %inout, i32 0, i32 0, i1 0)
+  %cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+  %cond = and i1 %cond1, %cond2
+  br i1 %cond, label %if, label %else
+if:
+  %bitcast = bitcast i32 %old to float
+  call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
+  ret void
+else:
+  ret void
+}
+
+; GCN-LABEL: add_i32_varying:
+; GFX7LESS-NOT: v_mbcnt_lo_u32_b32
+; GFX7LESS-NOT: v_mbcnt_hi_u32_b32
+; GFX8MORE: v_mbcnt_lo_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_lo:[0-9]+]], exec_lo, 0
+; GFX8MORE: v_mbcnt_hi_u32_b32{{(_e[0-9]+)?}} v[[mbcnt_hi:[0-9]+]], exec_hi, v[[mbcnt_lo]]
+; GFX8MORE: v_readlane_b32 s[[scalar_value:[0-9]+]], v{{[0-9]+}}, 63
+; GFX8MORE: v_cmp_eq_u32{{(_e[0-9]+)?}} vcc, 0, v[[mbcnt_hi]]
+; GFX8MORE: v_mov_b32{{(_e[0-9]+)?}} v[[value:[0-9]+]], s[[scalar_value]]
+; GFX8MORE: buffer_atomic_add v[[value]]
+; GFX8MORE: v_readfirstlane_b32 s{{[0-9]+}}, v[[value]]
+define amdgpu_ps void @add_i32_varying(<4 x i32> inreg %out, <4 x i32> inreg %inout, i32 %val) {
+entry:
+  %cond1 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+  %old = call i32 @llvm.amdgcn.buffer.atomic.add(i32 %val, <4 x i32> %inout, i32 0, i32 0, i1 0)
+  %cond2 = call i1 @llvm.amdgcn.wqm.vote(i1 true)
+  %cond = and i1 %cond1, %cond2
+  br i1 %cond, label %if, label %else
+if:
+  %bitcast = bitcast i32 %old to float
+  call void @llvm.amdgcn.buffer.store.f32(float %bitcast, <4 x i32> %out, i32 0, i32 0, i1 0, i1 0)
+  ret void
+else:
+  ret void
+}




More information about the llvm-commits mailing list