[llvm] f90849d - [AMDGPU] Use UniformityAnalysis in AtomicOptimizer
via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 15 01:40:01 PDT 2023
Author: pvanhout
Date: 2023-03-15T09:39:55+01:00
New Revision: f90849dfa3edc77f5e3088787e6e11ec201a7ea9
URL: https://github.com/llvm/llvm-project/commit/f90849dfa3edc77f5e3088787e6e11ec201a7ea9
DIFF: https://github.com/llvm/llvm-project/commit/f90849dfa3edc77f5e3088787e6e11ec201a7ea9.diff
LOG: [AMDGPU] Use UniformityAnalysis in AtomicOptimizer
Adds & uses a new `isDivergentUse` API in UA.
UniformityAnalysis now requires CycleInfo as well as the new temporal divergence API can query it.
-----
Original patch that adds `isDivergentUse` by @sameerds
The user of a temporally divergent value is marked as divergent in the
uniformity analysis. But the same user may also have been marked divergent for
other reasons, thus losing this information about temporal divergence. But some
clients need to specificly check for temporal divergence. This change restores
such an API, that already existed in DivergenceAnalysis.
Reviewed By: sameerds, foad
Differential Revision: https://reviews.llvm.org/D146018
Added:
Modified:
llvm/include/llvm/ADT/GenericSSAContext.h
llvm/include/llvm/ADT/GenericUniformityImpl.h
llvm/include/llvm/ADT/GenericUniformityInfo.h
llvm/include/llvm/CodeGen/MachineSSAContext.h
llvm/include/llvm/IR/SSAContext.h
llvm/lib/Analysis/UniformityAnalysis.cpp
llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
llvm/test/CodeGen/AMDGPU/divergence-at-use.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/ADT/GenericSSAContext.h b/llvm/include/llvm/ADT/GenericSSAContext.h
index 7c02060636e96..40b2be7c34c31 100644
--- a/llvm/include/llvm/ADT/GenericSSAContext.h
+++ b/llvm/include/llvm/ADT/GenericSSAContext.h
@@ -53,6 +53,11 @@ template <typename _FunctionT> class GenericSSAContext {
// indicated by the compiler.
using FunctionT = typename _FunctionT::invalidTemplateInstanceError;
+ // A UseT represents a data-edge from the defining instruction to the using
+ // instruction.
+ //
+ // using UseT = ...
+
// Initialize the SSA context with information about the FunctionT being
// processed.
//
diff --git a/llvm/include/llvm/ADT/GenericUniformityImpl.h b/llvm/include/llvm/ADT/GenericUniformityImpl.h
index 66a1880dbd7b9..7eff61b26ba56 100644
--- a/llvm/include/llvm/ADT/GenericUniformityImpl.h
+++ b/llvm/include/llvm/ADT/GenericUniformityImpl.h
@@ -330,6 +330,7 @@ template <typename ContextT> class GenericUniformityAnalysisImpl {
using FunctionT = typename ContextT::FunctionT;
using ValueRefT = typename ContextT::ValueRefT;
using ConstValueRefT = typename ContextT::ConstValueRefT;
+ using UseT = typename ContextT::UseT;
using InstructionT = typename ContextT::InstructionT;
using DominatorTreeT = typename ContextT::DominatorTreeT;
@@ -384,6 +385,8 @@ template <typename ContextT> class GenericUniformityAnalysisImpl {
/// \brief Whether \p Val is divergent at its definition.
bool isDivergent(ConstValueRefT V) const { return DivergentValues.count(V); }
+ bool isDivergentUse(const UseT &U) const;
+
bool hasDivergentTerminator(const BlockT &B) const {
return DivergentTermBlocks.contains(&B);
}
@@ -462,9 +465,9 @@ template <typename ContextT> class GenericUniformityAnalysisImpl {
bool usesValueFromCycle(const InstructionT &I, const CycleT &DefCycle) const;
- /// \brief Whether \p Val is divergent when read in \p ObservingBlock.
+ /// \brief Whether \p Def is divergent when read in \p ObservingBlock.
bool isTemporalDivergent(const BlockT &ObservingBlock,
- ConstValueRefT Val) const;
+ const InstructionT &Def) const;
};
template <typename ImplT>
@@ -1091,6 +1094,20 @@ getOutermostDivergentCycle(const CycleT *Cycle, const BlockT *DivTermBlock,
return Ext;
}
+template <typename ContextT>
+bool GenericUniformityAnalysisImpl<ContextT>::isTemporalDivergent(
+ const BlockT &ObservingBlock, const InstructionT &Def) const {
+ const BlockT *DefBlock = Def.getParent();
+ for (const CycleT *Cycle = CI.getCycle(DefBlock);
+ Cycle && !Cycle->contains(&ObservingBlock);
+ Cycle = Cycle->getParentCycle()) {
+ if (DivergentExitCycles.contains(Cycle)) {
+ return true;
+ }
+ }
+ return false;
+}
+
template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::analyzeControlDivergence(
const InstructionT &Term) {
@@ -1273,6 +1290,11 @@ bool GenericUniformityInfo<ContextT>::isDivergent(const InstructionT *I) const {
return DA->isDivergent(*I);
}
+template <typename ContextT>
+bool GenericUniformityInfo<ContextT>::isDivergentUse(const UseT &U) const {
+ return DA->isDivergentUse(U);
+}
+
template <typename ContextT>
bool GenericUniformityInfo<ContextT>::hasDivergentTerminator(const BlockT &B) {
return DA->hasDivergentTerminator(B);
diff --git a/llvm/include/llvm/ADT/GenericUniformityInfo.h b/llvm/include/llvm/ADT/GenericUniformityInfo.h
index 987239d2c2b08..add5f3c68225f 100644
--- a/llvm/include/llvm/ADT/GenericUniformityInfo.h
+++ b/llvm/include/llvm/ADT/GenericUniformityInfo.h
@@ -36,6 +36,7 @@ template <typename ContextT> class GenericUniformityInfo {
using FunctionT = typename ContextT::FunctionT;
using ValueRefT = typename ContextT::ValueRefT;
using ConstValueRefT = typename ContextT::ConstValueRefT;
+ using UseT = typename ContextT::UseT;
using InstructionT = typename ContextT::InstructionT;
using DominatorTreeT = typename ContextT::DominatorTreeT;
using ThisT = GenericUniformityInfo<ContextT>;
@@ -69,6 +70,10 @@ template <typename ContextT> class GenericUniformityInfo {
bool isUniform(const InstructionT *I) const { return !isDivergent(I); };
bool isDivergent(const InstructionT *I) const;
+ /// \brief Whether \p U is divergent. Uses of a uniform value can be
+ /// divergent.
+ bool isDivergentUse(const UseT &U) const;
+
bool hasDivergentTerminator(const BlockT &B);
void print(raw_ostream &Out) const;
diff --git a/llvm/include/llvm/CodeGen/MachineSSAContext.h b/llvm/include/llvm/CodeGen/MachineSSAContext.h
index 8bf963da9748f..a3788f1046ec9 100644
--- a/llvm/include/llvm/CodeGen/MachineSSAContext.h
+++ b/llvm/include/llvm/CodeGen/MachineSSAContext.h
@@ -45,6 +45,7 @@ template <> class GenericSSAContext<MachineFunction> {
using ValueRefT = Register;
using ConstValueRefT = Register;
static const Register ValueRefNull;
+ using UseT = MachineOperand;
using DominatorTreeT = DominatorTreeBase<BlockT, false>;
void setFunction(MachineFunction &Fn);
diff --git a/llvm/include/llvm/IR/SSAContext.h b/llvm/include/llvm/IR/SSAContext.h
index 346c1fc67574c..5180c92d65565 100644
--- a/llvm/include/llvm/IR/SSAContext.h
+++ b/llvm/include/llvm/IR/SSAContext.h
@@ -44,6 +44,7 @@ template <> class GenericSSAContext<Function> {
using ValueRefT = Value *;
using ConstValueRefT = const Value *;
static Value *ValueRefNull;
+ using UseT = Use;
using DominatorTreeT = DominatorTreeBase<BlockT, false>;
void setFunction(Function &Fn);
diff --git a/llvm/lib/Analysis/UniformityAnalysis.cpp b/llvm/lib/Analysis/UniformityAnalysis.cpp
index 7dfa628d7660d..13a9c2b7e4438 100644
--- a/llvm/lib/Analysis/UniformityAnalysis.cpp
+++ b/llvm/lib/Analysis/UniformityAnalysis.cpp
@@ -78,6 +78,19 @@ bool llvm::GenericUniformityAnalysisImpl<SSAContext>::usesValueFromCycle(
return false;
}
+template <>
+bool llvm::GenericUniformityAnalysisImpl<SSAContext>::isDivergentUse(
+ const Use &U) const {
+ const auto *V = U.get();
+ if (isDivergent(V))
+ return true;
+ if (const auto *DefInstr = dyn_cast<Instruction>(V)) {
+ const auto *UseInstr = cast<Instruction>(U.getUser());
+ return isTemporalDivergent(*UseInstr->getParent(), *DefInstr);
+ }
+ return false;
+}
+
// This ensures explicit instantiation of
// GenericUniformityAnalysisImpl::ImplDeleter::operator()
template class llvm::GenericUniformityInfo<SSAContext>;
@@ -122,6 +135,7 @@ UniformityInfoWrapperPass::UniformityInfoWrapperPass() : FunctionPass(ID) {
INITIALIZE_PASS_BEGIN(UniformityInfoWrapperPass, "uniformity",
"Uniformity Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(CycleInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(UniformityInfoWrapperPass, "uniformity",
"Uniformity Analysis", true, true)
@@ -129,7 +143,7 @@ INITIALIZE_PASS_END(UniformityInfoWrapperPass, "uniformity",
void UniformityInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<DominatorTreeWrapperPass>();
- AU.addRequired<CycleInfoWrapperPass>();
+ AU.addRequiredTransitive<CycleInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
}
diff --git a/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp b/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
index 0c44cd4336507..ef67bae1c1af0 100644
--- a/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
+++ b/llvm/lib/CodeGen/MachineUniformityAnalysis.cpp
@@ -113,6 +113,26 @@ bool llvm::GenericUniformityAnalysisImpl<MachineSSAContext>::usesValueFromCycle(
return false;
}
+template <>
+bool llvm::GenericUniformityAnalysisImpl<MachineSSAContext>::isDivergentUse(
+ const MachineOperand &U) const {
+ if (!U.isReg())
+ return false;
+
+ auto Reg = U.getReg();
+ if (isDivergent(Reg))
+ return true;
+
+ const auto &RegInfo = F.getRegInfo();
+ auto *Def = RegInfo.getOneDef(Reg);
+ if (!Def)
+ return true;
+
+ auto *DefInstr = Def->getParent();
+ auto *UseInstr = U.getParent();
+ return isTemporalDivergent(*UseInstr->getParent(), *DefInstr);
+}
+
// This ensures explicit instantiation of
// GenericUniformityAnalysisImpl::ImplDeleter::operator()
template class llvm::GenericUniformityInfo<MachineSSAContext>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 28967bb8e5b1c..e4ecfc710d6d8 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -15,7 +15,7 @@
#include "AMDGPU.h"
#include "GCNSubtarget.h"
-#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
+#include "llvm/Analysis/UniformityAnalysis.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
@@ -42,7 +42,7 @@ class AMDGPUAtomicOptimizer : public FunctionPass,
public InstVisitor<AMDGPUAtomicOptimizer> {
private:
SmallVector<ReplacementInfo, 8> ToReplace;
- const LegacyDivergenceAnalysis *DA;
+ const UniformityInfo *UA;
const DataLayout *DL;
DominatorTree *DT;
const GCNSubtarget *ST;
@@ -65,7 +65,7 @@ class AMDGPUAtomicOptimizer : public FunctionPass,
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addPreserved<DominatorTreeWrapperPass>();
- AU.addRequired<LegacyDivergenceAnalysis>();
+ AU.addRequired<UniformityInfoWrapperPass>();
AU.addRequired<TargetPassConfig>();
}
@@ -84,7 +84,7 @@ bool AMDGPUAtomicOptimizer::runOnFunction(Function &F) {
return false;
}
- DA = &getAnalysis<LegacyDivergenceAnalysis>();
+ UA = &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *const DTW =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
@@ -139,11 +139,11 @@ void AMDGPUAtomicOptimizer::visitAtomicRMWInst(AtomicRMWInst &I) {
// If the pointer operand is divergent, then each lane is doing an atomic
// operation on a
diff erent address, and we cannot optimize that.
- if (DA->isDivergentUse(&I.getOperandUse(PtrIdx))) {
+ if (UA->isDivergentUse(I.getOperandUse(PtrIdx))) {
return;
}
- const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx));
+ const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));
// If the value operand is divergent, each lane is contributing a
diff erent
// value to the atomic calculation. We can only optimize divergent values if
@@ -217,7 +217,7 @@ void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) {
const unsigned ValIdx = 0;
- const bool ValDivergent = DA->isDivergentUse(&I.getOperandUse(ValIdx));
+ const bool ValDivergent = UA->isDivergentUse(I.getOperandUse(ValIdx));
// If the value operand is divergent, each lane is contributing a
diff erent
// value to the atomic calculation. We can only optimize divergent values if
@@ -231,7 +231,7 @@ void AMDGPUAtomicOptimizer::visitIntrinsicInst(IntrinsicInst &I) {
// If any of the other arguments to the intrinsic are divergent, we can't
// optimize the operation.
for (unsigned Idx = 1; Idx < I.getNumOperands(); Idx++) {
- if (DA->isDivergentUse(&I.getOperandUse(Idx))) {
+ if (UA->isDivergentUse(I.getOperandUse(Idx))) {
return;
}
}
@@ -705,7 +705,7 @@ void AMDGPUAtomicOptimizer::optimizeAtomic(Instruction &I,
INITIALIZE_PASS_BEGIN(AMDGPUAtomicOptimizer, DEBUG_TYPE,
"AMDGPU atomic optimizations", false, false)
-INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
+INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(AMDGPUAtomicOptimizer, DEBUG_TYPE,
"AMDGPU atomic optimizations", false, false)
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-at-use.ll b/llvm/test/CodeGen/AMDGPU/divergence-at-use.ll
index 1771c0050217a..0c3470bebb0ff 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-at-use.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-at-use.ll
@@ -1,5 +1,4 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=true < %s | FileCheck %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=true < %s -use-gpu-divergence-analysis | FileCheck %s
@local = addrspace(3) global i32 undef
@@ -20,4 +19,26 @@ exit:
ret void
}
+define amdgpu_kernel void @def_in_nested_cycle() {
+; CHECK-LABEL: def_in_nested_cycle:
+; CHECK-NOT: dpp
+entry:
+ %x = call i32 @llvm.amdgcn.workitem.id.x()
+ br label %loop
+loop:
+ %i = phi i32 [ 0, %entry ], [ 0, %innerloop ], [ %i1, %loop ]
+ %cond = icmp ult i32 %i, %x
+ %i1 = add i32 %i, 1
+ br i1 %cond, label %innerloop, label %loop
+innerloop:
+ %i.inner = phi i32 [ 0, %loop ], [ %i1.inner, %innerloop ]
+ %gep = getelementptr i32, ptr addrspace(3) @local, i32 %i
+ %i1.inner = add i32 %i, 1
+ %cond.inner = icmp ult i32 %i, %x
+ br i1 %cond, label %innerloop, label %loop
+exit:
+ %old = atomicrmw add ptr addrspace(3) %gep, i32 %x acq_rel
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x()
More information about the llvm-commits
mailing list