[llvm] 30aa9fb - Revert "[InstrProf] Adding utility weights to BalancedPartitioning (#72717)"
via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 19 15:14:05 PST 2024
Author: spupyrev
Date: 2024-01-19T15:13:47-08:00
New Revision: 30aa9fb4c1da33892a38f952fbdf6e7e45e5953a
URL: https://github.com/llvm/llvm-project/commit/30aa9fb4c1da33892a38f952fbdf6e7e45e5953a
DIFF: https://github.com/llvm/llvm-project/commit/30aa9fb4c1da33892a38f952fbdf6e7e45e5953a.diff
LOG: Revert "[InstrProf] Adding utility weights to BalancedPartitioning (#72717)"
This reverts commit 5954b9dca21bb0c69b9e991b2ddb84c8b05ecba3
due to broken Windows build
Added:
Modified:
llvm/include/llvm/Support/BalancedPartitioning.h
llvm/lib/ProfileData/InstrProf.cpp
llvm/lib/Support/BalancedPartitioning.cpp
llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
llvm/unittests/Support/BalancedPartitioningTest.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index 19eb6d41e45e3a..a8464ac0fe60e5 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -57,27 +57,8 @@ class BPFunctionNode {
friend class BalancedPartitioning;
public:
- /// The type of ID
using IDT = uint64_t;
- /// The type of UtilityNode
- struct UtilityNodeT {
- UtilityNodeT(uint32_t Id, uint32_t Weight = 1) : Id(Id), Weight(Weight) {}
- uint32_t Id;
- uint32_t Weight;
-
- // Deduplicate utility nodes for a given function.
- // TODO: One may experiment with accumulating the weights of duplicates.
- static void sortAndDeduplicate(SmallVector<UtilityNodeT, 4> &UNs) {
- llvm::sort(UNs, [](const UtilityNodeT &L, const UtilityNodeT &R) {
- return L.Id < R.Id;
- });
- UNs.erase(std::unique(UNs.begin(), UNs.end(),
- [](const UtilityNodeT &L, const UtilityNodeT &R) {
- return L.Id == R.Id;
- }),
- UNs.end());
- }
- };
+ using UtilityNodeT = uint32_t;
/// \param UtilityNodes the set of utility nodes (must be unique'd)
BPFunctionNode(IDT Id, ArrayRef<UtilityNodeT> UtilityNodes)
@@ -97,7 +78,8 @@ class BPFunctionNode {
uint64_t InputOrderIndex = 0;
friend class BPFunctionNodeTest_Basic_Test;
- friend class BalancedPartitioningTest;
+ friend class BalancedPartitioningTest_Basic_Test;
+ friend class BalancedPartitioningTest_Large_Test;
};
/// Algorithm parameters; default values are tuned on real-world binaries
@@ -206,8 +188,6 @@ class BalancedPartitioning {
float CachedGainRL;
/// Whether \p CachedGainLR and \p CachedGainRL are valid
bool CachedGainIsValid = false;
- /// The weight of this utility node
- uint32_t Weight = 1;
};
protected:
diff --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index ba4fb227381f6c..2640027455e0da 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -923,15 +923,15 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
const int N = Log2_64(LargestTraceSize) + 1;
- // TODO: We may use the Trace.Weight field to give more weight to more
+ // TODO: We need to use the Trace.Weight field to give more weight to more
// important utilities
DenseMap<IDT, SmallVector<UtilityNodeT, 4>> FuncGroups;
- for (uint32_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
+ for (size_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
auto &Trace = Traces[TraceIdx].FunctionNameRefs;
for (size_t Timestamp = 0; Timestamp < Trace.size(); Timestamp++) {
for (int I = Log2_64(Timestamp + 1); I < N; I++) {
auto FunctionId = Trace[Timestamp];
- UtilityNodeT GroupId(TraceIdx * N + I);
+ UtilityNodeT GroupId = TraceIdx * N + I;
FuncGroups[FunctionId].push_back(GroupId);
}
}
@@ -940,7 +940,8 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
std::vector<BPFunctionNode> Nodes;
for (auto Id : FunctionIds) {
auto &UNs = FuncGroups[Id];
- UtilityNodeT::sortAndDeduplicate(UNs);
+ llvm::sort(UNs);
+ UNs.erase(std::unique(UNs.begin(), UNs.end()), UNs.end());
Nodes.emplace_back(Id, UNs);
}
return Nodes;
diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index cd71ddec476475..cb6ba61179941f 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -21,13 +21,8 @@ using namespace llvm;
#define DEBUG_TYPE "balanced-partitioning"
void BPFunctionNode::dump(raw_ostream &OS) const {
- OS << "{ID=" << Id << " Utilities={";
- for (auto &N : UtilityNodes)
- OS << N.Id << " ,";
- OS << "}";
- if (Bucket.has_value())
- OS << " Bucket=" << Bucket.value();
- OS << "}";
+ OS << formatv("{{ID={0} Utilities={{{1:$[,]}} Bucket={2}}", Id,
+ make_range(UtilityNodes.begin(), UtilityNodes.end()), Bucket);
}
template <typename Func>
@@ -171,40 +166,34 @@ void BalancedPartitioning::runIterations(const FunctionNodeRange Nodes,
unsigned RecDepth, unsigned LeftBucket,
unsigned RightBucket,
std::mt19937 &RNG) const {
- // Count the degree of each utility node.
- DenseMap<uint32_t, unsigned> UtilityNodeIndex;
+ unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
+ DenseMap<BPFunctionNode::UtilityNodeT, unsigned> UtilityNodeIndex;
for (auto &N : Nodes)
for (auto &UN : N.UtilityNodes)
- ++UtilityNodeIndex[UN.Id];
+ ++UtilityNodeIndex[UN];
// Remove utility nodes if they have just one edge or are connected to all
- // functions.
- unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
+ // functions
for (auto &N : Nodes)
llvm::erase_if(N.UtilityNodes, [&](auto &UN) {
- return UtilityNodeIndex[UN.Id] == 1 ||
- UtilityNodeIndex[UN.Id] == NumNodes;
+ return UtilityNodeIndex[UN] == 1 || UtilityNodeIndex[UN] == NumNodes;
});
- // Renumber utility nodes so they can be used to index into Signatures.
+ // Renumber utility nodes so they can be used to index into Signatures
UtilityNodeIndex.clear();
for (auto &N : Nodes)
for (auto &UN : N.UtilityNodes)
- UN.Id = UtilityNodeIndex.insert({UN.Id, UtilityNodeIndex.size()})
- .first->second;
+ UN = UtilityNodeIndex.insert({UN, UtilityNodeIndex.size()}).first->second;
- // Initialize signatures.
+ // Initialize signatures
SignaturesT Signatures(/*Size=*/UtilityNodeIndex.size());
for (auto &N : Nodes) {
for (auto &UN : N.UtilityNodes) {
- assert(UN.Id < Signatures.size());
- if (N.Bucket == LeftBucket)
- Signatures[UN.Id].LeftCount++;
- else
- Signatures[UN.Id].RightCount++;
- // Identical utility nodes (having the same UN.Id) have the same weight
- // (unless there are hash collisions mapping utilities to the same Id);
- // thus, we get a new weight only when the signature is uninitialized.
- Signatures[UN.Id].Weight = UN.Weight;
+ assert(UN < Signatures.size());
+ if (N.Bucket == LeftBucket) {
+ Signatures[UN].LeftCount++;
+ } else {
+ Signatures[UN].RightCount++;
+ }
}
}
@@ -232,11 +221,9 @@ unsigned BalancedPartitioning::runIteration(const FunctionNodeRange Nodes,
Signature.CachedGainLR = 0.f;
Signature.CachedGainRL = 0.f;
if (L > 0)
- Signature.CachedGainLR =
- (Cost - logCost(L - 1, R + 1)) * Signature.Weight;
+ Signature.CachedGainLR = Cost - logCost(L - 1, R + 1);
if (R > 0)
- Signature.CachedGainRL =
- (Cost - logCost(L + 1, R - 1)) * Signature.Weight;
+ Signature.CachedGainRL = Cost - logCost(L + 1, R - 1);
Signature.CachedGainIsValid = true;
}
@@ -295,14 +282,14 @@ bool BalancedPartitioning::moveFunctionNode(BPFunctionNode &N,
// Update signatures and invalidate gain cache
if (FromLeftToRight) {
for (auto &UN : N.UtilityNodes) {
- auto &Signature = Signatures[UN.Id];
+ auto &Signature = Signatures[UN];
Signature.LeftCount--;
Signature.RightCount++;
Signature.CachedGainIsValid = false;
}
} else {
for (auto &UN : N.UtilityNodes) {
- auto &Signature = Signatures[UN.Id];
+ auto &Signature = Signatures[UN];
Signature.LeftCount++;
Signature.RightCount--;
Signature.CachedGainIsValid = false;
@@ -331,8 +318,8 @@ float BalancedPartitioning::moveGain(const BPFunctionNode &N,
const SignaturesT &Signatures) {
float Gain = 0.f;
for (auto &UN : N.UtilityNodes)
- Gain += (FromLeftToRight ? Signatures[UN.Id].CachedGainLR
- : Signatures[UN.Id].CachedGainRL);
+ Gain += (FromLeftToRight ? Signatures[UN].CachedGainLR
+ : Signatures[UN].CachedGainRL);
return Gain;
}
diff --git a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
index 787e3041fb5e7b..6af6f1bcdc40a8 100644
--- a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
+++ b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
@@ -13,8 +13,8 @@
#include "gtest/gtest.h"
using testing::Field;
-using testing::Matcher;
using testing::UnorderedElementsAre;
+using testing::UnorderedElementsAreArray;
namespace llvm {
@@ -24,35 +24,29 @@ void PrintTo(const BPFunctionNode &Node, std::ostream *OS) {
}
TEST(BPFunctionNodeTest, Basic) {
- auto UNIdsAre = [](auto... Ids) {
- return UnorderedElementsAre(Field("Id", &BPFunctionNode::UtilityNodeT::Id,
- std::forward<uint32_t>(Ids))...);
- };
auto NodeIs = [](BPFunctionNode::IDT Id,
- Matcher<ArrayRef<BPFunctionNode::UtilityNodeT>> UNsMatcher) {
- return AllOf(
- Field("Id", &BPFunctionNode::Id, Id),
- Field("UtilityNodes", &BPFunctionNode::UtilityNodes, UNsMatcher));
+ ArrayRef<BPFunctionNode::UtilityNodeT> UNs) {
+ return AllOf(Field("Id", &BPFunctionNode::Id, Id),
+ Field("UtilityNodes", &BPFunctionNode::UtilityNodes,
+ UnorderedElementsAreArray(UNs)));
};
auto Nodes = TemporalProfTraceTy::createBPFunctionNodes({
TemporalProfTraceTy({0, 1, 2, 3}),
});
- EXPECT_THAT(Nodes, UnorderedElementsAre(NodeIs(0, UNIdsAre(0, 1, 2)),
- NodeIs(1, UNIdsAre(1, 2)),
- NodeIs(2, UNIdsAre(1, 2)),
- NodeIs(3, UNIdsAre(2))));
+ EXPECT_THAT(Nodes,
+ UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
+ NodeIs(2, {1, 2}), NodeIs(3, {2})));
Nodes = TemporalProfTraceTy::createBPFunctionNodes({
TemporalProfTraceTy({0, 1, 2, 3, 4}),
TemporalProfTraceTy({4, 2}),
});
- EXPECT_THAT(Nodes, UnorderedElementsAre(NodeIs(0, UNIdsAre(0, 1, 2)),
- NodeIs(1, UNIdsAre(1, 2)),
- NodeIs(2, UNIdsAre(1, 2, 4, 5)),
- NodeIs(3, UNIdsAre(2)),
- NodeIs(4, UNIdsAre(2, 3, 4, 5))));
+ EXPECT_THAT(Nodes,
+ UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
+ NodeIs(2, {1, 2, 4, 5}), NodeIs(3, {2}),
+ NodeIs(4, {2, 3, 4, 5})));
}
} // end namespace llvm
diff --git a/llvm/unittests/Support/BalancedPartitioningTest.cpp b/llvm/unittests/Support/BalancedPartitioningTest.cpp
index 79e89ae2014807..ebe518a8e89cac 100644
--- a/llvm/unittests/Support/BalancedPartitioningTest.cpp
+++ b/llvm/unittests/Support/BalancedPartitioningTest.cpp
@@ -37,20 +37,6 @@ class BalancedPartitioningTest : public ::testing::Test {
Ids.push_back(N.Id);
return Ids;
}
-
- static testing::Matcher<BPFunctionNode> NodeIdIs(BPFunctionNode::IDT Id) {
- return Field("Id", &BPFunctionNode::Id, Id);
- };
-
- static testing::Matcher<BPFunctionNode>
- NodeBucketIs(std::optional<uint32_t> Bucket) {
- return Field("Bucket", &BPFunctionNode::Bucket, Bucket);
- };
-
- static testing::Matcher<BPFunctionNode>
- NodeIs(BPFunctionNode::IDT Id, std::optional<uint32_t> Bucket) {
- return AllOf(NodeIdIs(Id), NodeBucketIs(Bucket));
- };
};
TEST_F(BalancedPartitioningTest, Basic) {
@@ -62,6 +48,11 @@ TEST_F(BalancedPartitioningTest, Basic) {
Bp.run(Nodes);
+ auto NodeIs = [](BPFunctionNode::IDT Id, std::optional<uint32_t> Bucket) {
+ return AllOf(Field("Id", &BPFunctionNode::Id, Id),
+ Field("Bucket", &BPFunctionNode::Bucket, Bucket));
+ };
+
EXPECT_THAT(Nodes,
UnorderedElementsAre(NodeIs(0, 0), NodeIs(1, 1), NodeIs(2, 2),
NodeIs(3, 3), NodeIs(4, 4)));
@@ -88,7 +79,8 @@ TEST_F(BalancedPartitioningTest, Large) {
Bp.run(Nodes);
- EXPECT_THAT(Nodes, Each(Not(NodeBucketIs(std::nullopt))));
+ EXPECT_THAT(
+ Nodes, Each(Not(Field("Bucket", &BPFunctionNode::Bucket, std::nullopt))));
EXPECT_THAT(getIds(Nodes), UnorderedElementsAreArray(OrigIds));
}
@@ -105,55 +97,4 @@ TEST_F(BalancedPartitioningTest, MoveGain) {
30.f);
}
-TEST_F(BalancedPartitioningTest, Weight1) {
- std::vector<BPFunctionNode::UtilityNodeT> UNs = {
- BPFunctionNode::UtilityNodeT(0, 100),
- BPFunctionNode::UtilityNodeT(1, 100),
- BPFunctionNode::UtilityNodeT(2, 100),
- BPFunctionNode::UtilityNodeT(3, 1),
- BPFunctionNode::UtilityNodeT(4, 1),
- };
- std::vector<BPFunctionNode> Nodes = {
- BPFunctionNode(0, {UNs[0], UNs[3]}), BPFunctionNode(1, {UNs[1], UNs[3]}),
- BPFunctionNode(2, {UNs[2], UNs[3]}), BPFunctionNode(3, {UNs[0], UNs[4]}),
- BPFunctionNode(4, {UNs[1], UNs[4]}), BPFunctionNode(5, {UNs[2], UNs[4]}),
- };
-
- Bp.run(Nodes);
-
- // Check that nodes that share important UNs are ordered together
- auto NodesRef = ArrayRef(Nodes);
- auto Groups = {NodesRef.slice(0, 2), NodesRef.slice(2, 2),
- NodesRef.slice(4, 2)};
- EXPECT_THAT(Groups, UnorderedElementsAre(
- UnorderedElementsAre(NodeIdIs(0), NodeIdIs(3)),
- UnorderedElementsAre(NodeIdIs(1), NodeIdIs(4)),
- UnorderedElementsAre(NodeIdIs(2), NodeIdIs(5))));
-}
-
-TEST_F(BalancedPartitioningTest, Weight2) {
- std::vector<BPFunctionNode::UtilityNodeT> UNs = {
- BPFunctionNode::UtilityNodeT(0, 1),
- BPFunctionNode::UtilityNodeT(1, 1),
- BPFunctionNode::UtilityNodeT(2, 1),
- BPFunctionNode::UtilityNodeT(3, 100),
- BPFunctionNode::UtilityNodeT(4, 100),
- };
- std::vector<BPFunctionNode> Nodes = {
- BPFunctionNode(0, {UNs[0], UNs[3]}), BPFunctionNode(1, {UNs[1], UNs[4]}),
- BPFunctionNode(2, {UNs[2], UNs[3]}), BPFunctionNode(3, {UNs[0], UNs[4]}),
- BPFunctionNode(4, {UNs[1], UNs[3]}), BPFunctionNode(5, {UNs[2], UNs[4]}),
- };
-
- Bp.run(Nodes);
-
- // Check that nodes that share important UNs are ordered together
- auto NodesRef = ArrayRef(Nodes);
- auto Groups = {NodesRef.slice(0, 3), NodesRef.slice(3, 3)};
- EXPECT_THAT(Groups,
- UnorderedElementsAre(
- UnorderedElementsAre(NodeIdIs(0), NodeIdIs(2), NodeIdIs(4)),
- UnorderedElementsAre(NodeIdIs(1), NodeIdIs(3), NodeIdIs(5))));
-}
-
} // end namespace llvm
More information about the llvm-commits
mailing list