[llvm] [InstrProf] Adding utility weights to BalancedPartitioning (PR #72717)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 17 14:58:26 PST 2023
https://github.com/spupyrev created https://github.com/llvm/llvm-project/pull/72717
None
>From 2d1b06e75faf3a4e49896d5d34b4e317da8e9ec4 Mon Sep 17 00:00:00 2001
From: spupyrev <spupyrev at fb.com>
Date: Fri, 17 Nov 2023 14:51:58 -0800
Subject: [PATCH] [InstrProf] Adding utility weights to BalancedPartitioning
---
.../llvm/Support/BalancedPartitioning.h | 9 +++-
llvm/lib/ProfileData/InstrProf.cpp | 17 +++++---
llvm/lib/Support/BalancedPartitioning.cpp | 41 +++++++++++--------
3 files changed, 43 insertions(+), 24 deletions(-)
diff --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index a8464ac0fe60e58..123ba20a621c61f 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -57,8 +57,13 @@ class BPFunctionNode {
friend class BalancedPartitioning;
public:
+ /// The type of ID
using IDT = uint64_t;
- using UtilityNodeT = uint32_t;
+ /// The type of UtilityNode
+ struct UtilityNodeT {
+ uint32_t id;
+ uint32_t weight;
+ };
/// \param UtilityNodes the set of utility nodes (must be unique'd)
BPFunctionNode(IDT Id, ArrayRef<UtilityNodeT> UtilityNodes)
@@ -188,6 +193,8 @@ class BalancedPartitioning {
float CachedGainRL;
/// Whether \p CachedGainLR and \p CachedGainRL are valid
bool CachedGainIsValid = false;
+ /// The weight of this utility node
+ uint32_t Weight = 1;
};
protected:
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index 236b083a1e2155b..586f02b31568e56 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -916,15 +916,15 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
int N = std::ceil(std::log2(LargestTraceSize));
- // TODO: We need to use the Trace.Weight field to give more weight to more
+ // TODO: We may use the Trace.Weight field to give more weight to more
// important utilities
DenseMap<IDT, SmallVector<UtilityNodeT, 4>> FuncGroups;
- for (size_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
+ for (uint32_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
auto &Trace = Traces[TraceIdx].FunctionNameRefs;
for (size_t Timestamp = 0; Timestamp < Trace.size(); Timestamp++) {
for (int I = std::floor(std::log2(Timestamp + 1)); I < N; I++) {
auto &FunctionId = Trace[Timestamp];
- UtilityNodeT GroupId = TraceIdx * N + I;
+ UtilityNodeT GroupId = {TraceIdx * N + I, 1};
FuncGroups[FunctionId].push_back(GroupId);
}
}
@@ -933,8 +933,15 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
std::vector<BPFunctionNode> Nodes;
for (auto &Id : FunctionIds) {
auto &UNs = FuncGroups[Id];
- llvm::sort(UNs);
- UNs.erase(std::unique(UNs.begin(), UNs.end()), UNs.end());
+ llvm::sort(UNs.begin(), UNs.end(),
+ [](const UtilityNodeT &L, const UtilityNodeT &R) {
+ return L.id < R.id;
+ });
+ UNs.erase(std::unique(UNs.begin(), UNs.end(),
+ [](const UtilityNodeT &L, const UtilityNodeT &R) {
+ return L.id == R.id;
+ }),
+ UNs.end());
Nodes.emplace_back(Id, UNs);
}
return Nodes;
diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index 5843be949911514..1778741e2cc54c7 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -21,8 +21,10 @@ using namespace llvm;
#define DEBUG_TYPE "balanced-partitioning"
void BPFunctionNode::dump(raw_ostream &OS) const {
- OS << formatv("{{ID={0} Utilities={{{1:$[,]}} Bucket={2}}", Id,
- make_range(UtilityNodes.begin(), UtilityNodes.end()), Bucket);
+ // TODO
+ // OS << formatv("{{ID={0} Utilities={{{1:$[,]}} Bucket={2}}", Id,
+ // make_range(UtilityNodes.begin(), UtilityNodes.end()),
+ // Bucket);
}
template <typename Func>
@@ -167,36 +169,37 @@ void BalancedPartitioning::runIterations(const FunctionNodeRange Nodes,
unsigned RightBucket,
std::mt19937 &RNG) const {
unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
- DenseMap<BPFunctionNode::UtilityNodeT, unsigned> UtilityNodeDegree;
+ DenseMap<uint32_t, unsigned> UtilityNodeDegree;
for (auto &N : Nodes)
for (auto &UN : N.UtilityNodes)
- ++UtilityNodeDegree[UN];
+ ++UtilityNodeDegree[UN.id];
// Remove utility nodes if they have just one edge or are connected to all
// functions
for (auto &N : Nodes)
llvm::erase_if(N.UtilityNodes, [&](auto &UN) {
- return UtilityNodeDegree[UN] <= 1 || UtilityNodeDegree[UN] >= NumNodes;
+ return UtilityNodeDegree[UN.id] <= 1 ||
+ UtilityNodeDegree[UN.id] >= NumNodes;
});
// Renumber utility nodes so they can be used to index into Signatures
- DenseMap<BPFunctionNode::UtilityNodeT, unsigned> UtilityNodeIndex;
+ DenseMap<uint32_t, unsigned> UtilityNodeIndex;
for (auto &N : Nodes)
for (auto &UN : N.UtilityNodes)
- if (!UtilityNodeIndex.count(UN))
- UtilityNodeIndex[UN] = UtilityNodeIndex.size();
+ if (!UtilityNodeIndex.count(UN.id))
+ UtilityNodeIndex[UN.id] = UtilityNodeIndex.size();
for (auto &N : Nodes)
for (auto &UN : N.UtilityNodes)
- UN = UtilityNodeIndex[UN];
+ UN.id = UtilityNodeIndex[UN.id];
// Initialize signatures
SignaturesT Signatures(/*Size=*/UtilityNodeIndex.size());
for (auto &N : Nodes) {
for (auto &UN : N.UtilityNodes) {
- assert(UN < Signatures.size());
+ assert(UN.id < Signatures.size());
if (N.Bucket == LeftBucket) {
- Signatures[UN].LeftCount++;
+ Signatures[UN.id].LeftCount++;
} else {
- Signatures[UN].RightCount++;
+ Signatures[UN.id].RightCount++;
}
}
}
@@ -225,9 +228,11 @@ unsigned BalancedPartitioning::runIteration(const FunctionNodeRange Nodes,
Signature.CachedGainLR = 0.f;
Signature.CachedGainRL = 0.f;
if (L > 0)
- Signature.CachedGainLR = Cost - logCost(L - 1, R + 1);
+ Signature.CachedGainLR =
+ (Cost - logCost(L - 1, R + 1)) * Signature.Weight;
if (R > 0)
- Signature.CachedGainRL = Cost - logCost(L + 1, R - 1);
+ Signature.CachedGainRL =
+ (Cost - logCost(L + 1, R - 1)) * Signature.Weight;
Signature.CachedGainIsValid = true;
}
@@ -286,14 +291,14 @@ bool BalancedPartitioning::moveFunctionNode(BPFunctionNode &N,
// Update signatures and invalidate gain cache
if (FromLeftToRight) {
for (auto &UN : N.UtilityNodes) {
- auto &Signature = Signatures[UN];
+ auto &Signature = Signatures[UN.id];
Signature.LeftCount--;
Signature.RightCount++;
Signature.CachedGainIsValid = false;
}
} else {
for (auto &UN : N.UtilityNodes) {
- auto &Signature = Signatures[UN];
+ auto &Signature = Signatures[UN.id];
Signature.LeftCount++;
Signature.RightCount--;
Signature.CachedGainIsValid = false;
@@ -322,8 +327,8 @@ float BalancedPartitioning::moveGain(const BPFunctionNode &N,
const SignaturesT &Signatures) {
float Gain = 0.f;
for (auto &UN : N.UtilityNodes)
- Gain += (FromLeftToRight ? Signatures[UN].CachedGainLR
- : Signatures[UN].CachedGainRL);
+ Gain += (FromLeftToRight ? Signatures[UN.id].CachedGainLR
+ : Signatures[UN.id].CachedGainRL);
return Gain;
}
More information about the llvm-commits
mailing list