[llvm] 5954b9d - [InstrProf] Adding utility weights to BalancedPartitioning (#72717)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 19 13:37:03 PST 2024


Author: spupyrev
Date: 2024-01-19T13:36:59-08:00
New Revision: 5954b9dca21bb0c69b9e991b2ddb84c8b05ecba3

URL: https://github.com/llvm/llvm-project/commit/5954b9dca21bb0c69b9e991b2ddb84c8b05ecba3
DIFF: https://github.com/llvm/llvm-project/commit/5954b9dca21bb0c69b9e991b2ddb84c8b05ecba3.diff

LOG: [InstrProf] Adding utility weights to BalancedPartitioning (#72717)

Adding weights to utility nodes in BP so that we can give more
importance to
certain utilities. This is useful when we optimize several objectives
jointly.

Added: 
    

Modified: 
    llvm/include/llvm/Support/BalancedPartitioning.h
    llvm/lib/ProfileData/InstrProf.cpp
    llvm/lib/Support/BalancedPartitioning.cpp
    llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
    llvm/unittests/Support/BalancedPartitioningTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/BalancedPartitioning.h b/llvm/include/llvm/Support/BalancedPartitioning.h
index a8464ac0fe60e58..19eb6d41e45e3a8 100644
--- a/llvm/include/llvm/Support/BalancedPartitioning.h
+++ b/llvm/include/llvm/Support/BalancedPartitioning.h
@@ -57,8 +57,27 @@ class BPFunctionNode {
   friend class BalancedPartitioning;
 
 public:
+  /// The type of ID
   using IDT = uint64_t;
-  using UtilityNodeT = uint32_t;
+  /// The type of UtilityNode
+  struct UtilityNodeT {
+    UtilityNodeT(uint32_t Id, uint32_t Weight = 1) : Id(Id), Weight(Weight) {}
+    uint32_t Id;
+    uint32_t Weight;
+
+    // Deduplicate utility nodes for a given function.
+    // TODO: One may experiment with accumulating the weights of duplicates.
+    static void sortAndDeduplicate(SmallVector<UtilityNodeT, 4> &UNs) {
+      llvm::sort(UNs, [](const UtilityNodeT &L, const UtilityNodeT &R) {
+        return L.Id < R.Id;
+      });
+      UNs.erase(std::unique(UNs.begin(), UNs.end(),
+                            [](const UtilityNodeT &L, const UtilityNodeT &R) {
+                              return L.Id == R.Id;
+                            }),
+                UNs.end());
+    }
+  };
 
   /// \param UtilityNodes the set of utility nodes (must be unique'd)
   BPFunctionNode(IDT Id, ArrayRef<UtilityNodeT> UtilityNodes)
@@ -78,8 +97,7 @@ class BPFunctionNode {
   uint64_t InputOrderIndex = 0;
 
   friend class BPFunctionNodeTest_Basic_Test;
-  friend class BalancedPartitioningTest_Basic_Test;
-  friend class BalancedPartitioningTest_Large_Test;
+  friend class BalancedPartitioningTest;
 };
 
 /// Algorithm parameters; default values are tuned on real-world binaries
@@ -188,6 +206,8 @@ class BalancedPartitioning {
     float CachedGainRL;
     /// Whether \p CachedGainLR and \p CachedGainRL are valid
     bool CachedGainIsValid = false;
+    /// The weight of this utility node
+    uint32_t Weight = 1;
   };
 
 protected:

diff  --git a/llvm/lib/ProfileData/InstrProf.cpp b/llvm/lib/ProfileData/InstrProf.cpp
index 2640027455e0dad..ba4fb227381f6c9 100644
--- a/llvm/lib/ProfileData/InstrProf.cpp
+++ b/llvm/lib/ProfileData/InstrProf.cpp
@@ -923,15 +923,15 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
 
   const int N = Log2_64(LargestTraceSize) + 1;
 
-  // TODO: We need to use the Trace.Weight field to give more weight to more
+  // TODO: We may use the Trace.Weight field to give more weight to more
   // important utilities
   DenseMap<IDT, SmallVector<UtilityNodeT, 4>> FuncGroups;
-  for (size_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
+  for (uint32_t TraceIdx = 0; TraceIdx < Traces.size(); TraceIdx++) {
     auto &Trace = Traces[TraceIdx].FunctionNameRefs;
     for (size_t Timestamp = 0; Timestamp < Trace.size(); Timestamp++) {
       for (int I = Log2_64(Timestamp + 1); I < N; I++) {
         auto FunctionId = Trace[Timestamp];
-        UtilityNodeT GroupId = TraceIdx * N + I;
+        UtilityNodeT GroupId(TraceIdx * N + I);
         FuncGroups[FunctionId].push_back(GroupId);
       }
     }
@@ -940,8 +940,7 @@ std::vector<BPFunctionNode> TemporalProfTraceTy::createBPFunctionNodes(
   std::vector<BPFunctionNode> Nodes;
   for (auto Id : FunctionIds) {
     auto &UNs = FuncGroups[Id];
-    llvm::sort(UNs);
-    UNs.erase(std::unique(UNs.begin(), UNs.end()), UNs.end());
+    UtilityNodeT::sortAndDeduplicate(UNs);
     Nodes.emplace_back(Id, UNs);
   }
   return Nodes;

diff  --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index cb6ba61179941fd..cd71ddec4764756 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -21,8 +21,13 @@ using namespace llvm;
 #define DEBUG_TYPE "balanced-partitioning"
 
 void BPFunctionNode::dump(raw_ostream &OS) const {
-  OS << formatv("{{ID={0} Utilities={{{1:$[,]}} Bucket={2}}", Id,
-                make_range(UtilityNodes.begin(), UtilityNodes.end()), Bucket);
+  OS << "{ID=" << Id << " Utilities={";
+  for (auto &N : UtilityNodes)
+    OS << N.Id << " ,";
+  OS << "}";
+  if (Bucket.has_value())
+    OS << " Bucket=" << Bucket.value();
+  OS << "}";
 }
 
 template <typename Func>
@@ -166,34 +171,40 @@ void BalancedPartitioning::runIterations(const FunctionNodeRange Nodes,
                                          unsigned RecDepth, unsigned LeftBucket,
                                          unsigned RightBucket,
                                          std::mt19937 &RNG) const {
-  unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
-  DenseMap<BPFunctionNode::UtilityNodeT, unsigned> UtilityNodeIndex;
+  // Count the degree of each utility node.
+  DenseMap<uint32_t, unsigned> UtilityNodeIndex;
   for (auto &N : Nodes)
     for (auto &UN : N.UtilityNodes)
-      ++UtilityNodeIndex[UN];
+      ++UtilityNodeIndex[UN.Id];
   // Remove utility nodes if they have just one edge or are connected to all
-  // functions
+  // functions.
+  unsigned NumNodes = std::distance(Nodes.begin(), Nodes.end());
   for (auto &N : Nodes)
     llvm::erase_if(N.UtilityNodes, [&](auto &UN) {
-      return UtilityNodeIndex[UN] == 1 || UtilityNodeIndex[UN] == NumNodes;
+      return UtilityNodeIndex[UN.Id] == 1 ||
+             UtilityNodeIndex[UN.Id] == NumNodes;
     });
 
-  // Renumber utility nodes so they can be used to index into Signatures
+  // Renumber utility nodes so they can be used to index into Signatures.
   UtilityNodeIndex.clear();
   for (auto &N : Nodes)
     for (auto &UN : N.UtilityNodes)
-      UN = UtilityNodeIndex.insert({UN, UtilityNodeIndex.size()}).first->second;
+      UN.Id = UtilityNodeIndex.insert({UN.Id, UtilityNodeIndex.size()})
+                  .first->second;
 
-  // Initialize signatures
+  // Initialize signatures.
   SignaturesT Signatures(/*Size=*/UtilityNodeIndex.size());
   for (auto &N : Nodes) {
     for (auto &UN : N.UtilityNodes) {
-      assert(UN < Signatures.size());
-      if (N.Bucket == LeftBucket) {
-        Signatures[UN].LeftCount++;
-      } else {
-        Signatures[UN].RightCount++;
-      }
+      assert(UN.Id < Signatures.size());
+      if (N.Bucket == LeftBucket)
+        Signatures[UN.Id].LeftCount++;
+      else
+        Signatures[UN.Id].RightCount++;
+      // Identical utility nodes (having the same UN.Id) have the same weight
+      // (unless there are hash collisions mapping utilities to the same Id);
+      // thus, we get a new weight only when the signature is uninitialized.
+      Signatures[UN.Id].Weight = UN.Weight;
     }
   }
 
@@ -221,9 +232,11 @@ unsigned BalancedPartitioning::runIteration(const FunctionNodeRange Nodes,
     Signature.CachedGainLR = 0.f;
     Signature.CachedGainRL = 0.f;
     if (L > 0)
-      Signature.CachedGainLR = Cost - logCost(L - 1, R + 1);
+      Signature.CachedGainLR =
+          (Cost - logCost(L - 1, R + 1)) * Signature.Weight;
     if (R > 0)
-      Signature.CachedGainRL = Cost - logCost(L + 1, R - 1);
+      Signature.CachedGainRL =
+          (Cost - logCost(L + 1, R - 1)) * Signature.Weight;
     Signature.CachedGainIsValid = true;
   }
 
@@ -282,14 +295,14 @@ bool BalancedPartitioning::moveFunctionNode(BPFunctionNode &N,
   // Update signatures and invalidate gain cache
   if (FromLeftToRight) {
     for (auto &UN : N.UtilityNodes) {
-      auto &Signature = Signatures[UN];
+      auto &Signature = Signatures[UN.Id];
       Signature.LeftCount--;
       Signature.RightCount++;
       Signature.CachedGainIsValid = false;
     }
   } else {
     for (auto &UN : N.UtilityNodes) {
-      auto &Signature = Signatures[UN];
+      auto &Signature = Signatures[UN.Id];
       Signature.LeftCount++;
       Signature.RightCount--;
       Signature.CachedGainIsValid = false;
@@ -318,8 +331,8 @@ float BalancedPartitioning::moveGain(const BPFunctionNode &N,
                                      const SignaturesT &Signatures) {
   float Gain = 0.f;
   for (auto &UN : N.UtilityNodes)
-    Gain += (FromLeftToRight ? Signatures[UN].CachedGainLR
-                             : Signatures[UN].CachedGainRL);
+    Gain += (FromLeftToRight ? Signatures[UN.Id].CachedGainLR
+                             : Signatures[UN.Id].CachedGainRL);
   return Gain;
 }
 

diff  --git a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
index 6af6f1bcdc40a80..787e3041fb5e7b5 100644
--- a/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
+++ b/llvm/unittests/ProfileData/BPFunctionNodeTest.cpp
@@ -13,8 +13,8 @@
 #include "gtest/gtest.h"
 
 using testing::Field;
+using testing::Matcher;
 using testing::UnorderedElementsAre;
-using testing::UnorderedElementsAreArray;
 
 namespace llvm {
 
@@ -24,29 +24,35 @@ void PrintTo(const BPFunctionNode &Node, std::ostream *OS) {
 }
 
 TEST(BPFunctionNodeTest, Basic) {
+  auto UNIdsAre = [](auto... Ids) {
+    return UnorderedElementsAre(Field("Id", &BPFunctionNode::UtilityNodeT::Id,
+                                      std::forward<uint32_t>(Ids))...);
+  };
   auto NodeIs = [](BPFunctionNode::IDT Id,
-                   ArrayRef<BPFunctionNode::UtilityNodeT> UNs) {
-    return AllOf(Field("Id", &BPFunctionNode::Id, Id),
-                 Field("UtilityNodes", &BPFunctionNode::UtilityNodes,
-                       UnorderedElementsAreArray(UNs)));
+                   Matcher<ArrayRef<BPFunctionNode::UtilityNodeT>> UNsMatcher) {
+    return AllOf(
+        Field("Id", &BPFunctionNode::Id, Id),
+        Field("UtilityNodes", &BPFunctionNode::UtilityNodes, UNsMatcher));
   };
 
   auto Nodes = TemporalProfTraceTy::createBPFunctionNodes({
       TemporalProfTraceTy({0, 1, 2, 3}),
   });
-  EXPECT_THAT(Nodes,
-              UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
-                                   NodeIs(2, {1, 2}), NodeIs(3, {2})));
+  EXPECT_THAT(Nodes, UnorderedElementsAre(NodeIs(0, UNIdsAre(0, 1, 2)),
+                                          NodeIs(1, UNIdsAre(1, 2)),
+                                          NodeIs(2, UNIdsAre(1, 2)),
+                                          NodeIs(3, UNIdsAre(2))));
 
   Nodes = TemporalProfTraceTy::createBPFunctionNodes({
       TemporalProfTraceTy({0, 1, 2, 3, 4}),
       TemporalProfTraceTy({4, 2}),
   });
 
-  EXPECT_THAT(Nodes,
-              UnorderedElementsAre(NodeIs(0, {0, 1, 2}), NodeIs(1, {1, 2}),
-                                   NodeIs(2, {1, 2, 4, 5}), NodeIs(3, {2}),
-                                   NodeIs(4, {2, 3, 4, 5})));
+  EXPECT_THAT(Nodes, UnorderedElementsAre(NodeIs(0, UNIdsAre(0, 1, 2)),
+                                          NodeIs(1, UNIdsAre(1, 2)),
+                                          NodeIs(2, UNIdsAre(1, 2, 4, 5)),
+                                          NodeIs(3, UNIdsAre(2)),
+                                          NodeIs(4, UNIdsAre(2, 3, 4, 5))));
 }
 
 } // end namespace llvm

diff  --git a/llvm/unittests/Support/BalancedPartitioningTest.cpp b/llvm/unittests/Support/BalancedPartitioningTest.cpp
index ebe518a8e89cacf..79e89ae20148073 100644
--- a/llvm/unittests/Support/BalancedPartitioningTest.cpp
+++ b/llvm/unittests/Support/BalancedPartitioningTest.cpp
@@ -37,6 +37,20 @@ class BalancedPartitioningTest : public ::testing::Test {
       Ids.push_back(N.Id);
     return Ids;
   }
+
+  static testing::Matcher<BPFunctionNode> NodeIdIs(BPFunctionNode::IDT Id) {
+    return Field("Id", &BPFunctionNode::Id, Id);
+  };
+
+  static testing::Matcher<BPFunctionNode>
+  NodeBucketIs(std::optional<uint32_t> Bucket) {
+    return Field("Bucket", &BPFunctionNode::Bucket, Bucket);
+  };
+
+  static testing::Matcher<BPFunctionNode>
+  NodeIs(BPFunctionNode::IDT Id, std::optional<uint32_t> Bucket) {
+    return AllOf(NodeIdIs(Id), NodeBucketIs(Bucket));
+  };
 };
 
 TEST_F(BalancedPartitioningTest, Basic) {
@@ -48,11 +62,6 @@ TEST_F(BalancedPartitioningTest, Basic) {
 
   Bp.run(Nodes);
 
-  auto NodeIs = [](BPFunctionNode::IDT Id, std::optional<uint32_t> Bucket) {
-    return AllOf(Field("Id", &BPFunctionNode::Id, Id),
-                 Field("Bucket", &BPFunctionNode::Bucket, Bucket));
-  };
-
   EXPECT_THAT(Nodes,
               UnorderedElementsAre(NodeIs(0, 0), NodeIs(1, 1), NodeIs(2, 2),
                                    NodeIs(3, 3), NodeIs(4, 4)));
@@ -79,8 +88,7 @@ TEST_F(BalancedPartitioningTest, Large) {
 
   Bp.run(Nodes);
 
-  EXPECT_THAT(
-      Nodes, Each(Not(Field("Bucket", &BPFunctionNode::Bucket, std::nullopt))));
+  EXPECT_THAT(Nodes, Each(Not(NodeBucketIs(std::nullopt))));
   EXPECT_THAT(getIds(Nodes), UnorderedElementsAreArray(OrigIds));
 }
 
@@ -97,4 +105,55 @@ TEST_F(BalancedPartitioningTest, MoveGain) {
                   30.f);
 }
 
+TEST_F(BalancedPartitioningTest, Weight1) {
+  std::vector<BPFunctionNode::UtilityNodeT> UNs = {
+      BPFunctionNode::UtilityNodeT(0, 100),
+      BPFunctionNode::UtilityNodeT(1, 100),
+      BPFunctionNode::UtilityNodeT(2, 100),
+      BPFunctionNode::UtilityNodeT(3, 1),
+      BPFunctionNode::UtilityNodeT(4, 1),
+  };
+  std::vector<BPFunctionNode> Nodes = {
+      BPFunctionNode(0, {UNs[0], UNs[3]}), BPFunctionNode(1, {UNs[1], UNs[3]}),
+      BPFunctionNode(2, {UNs[2], UNs[3]}), BPFunctionNode(3, {UNs[0], UNs[4]}),
+      BPFunctionNode(4, {UNs[1], UNs[4]}), BPFunctionNode(5, {UNs[2], UNs[4]}),
+  };
+
+  Bp.run(Nodes);
+
+  // Check that nodes that share important UNs are ordered together
+  auto NodesRef = ArrayRef(Nodes);
+  auto Groups = {NodesRef.slice(0, 2), NodesRef.slice(2, 2),
+                 NodesRef.slice(4, 2)};
+  EXPECT_THAT(Groups, UnorderedElementsAre(
+                          UnorderedElementsAre(NodeIdIs(0), NodeIdIs(3)),
+                          UnorderedElementsAre(NodeIdIs(1), NodeIdIs(4)),
+                          UnorderedElementsAre(NodeIdIs(2), NodeIdIs(5))));
+}
+
+TEST_F(BalancedPartitioningTest, Weight2) {
+  std::vector<BPFunctionNode::UtilityNodeT> UNs = {
+      BPFunctionNode::UtilityNodeT(0, 1),
+      BPFunctionNode::UtilityNodeT(1, 1),
+      BPFunctionNode::UtilityNodeT(2, 1),
+      BPFunctionNode::UtilityNodeT(3, 100),
+      BPFunctionNode::UtilityNodeT(4, 100),
+  };
+  std::vector<BPFunctionNode> Nodes = {
+      BPFunctionNode(0, {UNs[0], UNs[3]}), BPFunctionNode(1, {UNs[1], UNs[4]}),
+      BPFunctionNode(2, {UNs[2], UNs[3]}), BPFunctionNode(3, {UNs[0], UNs[4]}),
+      BPFunctionNode(4, {UNs[1], UNs[3]}), BPFunctionNode(5, {UNs[2], UNs[4]}),
+  };
+
+  Bp.run(Nodes);
+
+  // Check that nodes that share important UNs are ordered together
+  auto NodesRef = ArrayRef(Nodes);
+  auto Groups = {NodesRef.slice(0, 3), NodesRef.slice(3, 3)};
+  EXPECT_THAT(Groups,
+              UnorderedElementsAre(
+                  UnorderedElementsAre(NodeIdIs(0), NodeIdIs(2), NodeIdIs(4)),
+                  UnorderedElementsAre(NodeIdIs(1), NodeIdIs(3), NodeIdIs(5))));
+}
+
 } // end namespace llvm


        


More information about the llvm-commits mailing list