[llvm] [MemProf] Convert CallContextInfo to a struct (NFC) (PR #108086)

Teresa Johnson via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 10 15:31:33 PDT 2024


https://github.com/teresajohnson updated https://github.com/llvm/llvm-project/pull/108086

>From 90bf8a6059e753633076ea58a209350b8e243dae Mon Sep 17 00:00:00 2001
From: Teresa Johnson <tejohnson at google.com>
Date: Tue, 10 Sep 2024 13:59:22 -0700
Subject: [PATCH 1/2] [MemProf] Convert CallContextInfo to a struct (NFC)

As suggested in #107918, improve readability by converting this tuple to
a struct.
---
 .../IPO/MemProfContextDisambiguation.cpp      | 37 +++++++++++++------
 1 file changed, 25 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 5c09bb1800cb2b..f1fe205ce4e2c2 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -470,8 +470,20 @@ class CallsiteContextGraph {
 private:
   using EdgeIter = typename std::vector<std::shared_ptr<ContextEdge>>::iterator;
 
-  using CallContextInfo = std::tuple<CallTy, std::vector<uint64_t>,
-                                     const FuncTy *, DenseSet<uint32_t>>;
+  // Structure to keep track of information for each call as we are matching
+  // non-allocation callsites onto context nodes created from the allocation
+  // call metadata / summary contexts.
+  struct CallContextInfo {
+    // The callsite we're trying to match.
+    CallTy Call;
+    // The callsites stack ids that have a context node in the graph.
+    std::vector<uint64_t> StackIds;
+    // The function containing this callsite.
+    const FuncTy *Func;
+    // Initially empty, if needed this will be updated to contain the context
+    // ids for use in a new context node created for this callsite.
+    DenseSet<uint32_t> ContextIds;
+  };
 
   /// Assigns the given Node to calls at or inlined into the location with
   /// the Node's stack id, after post order traversing and processing its
@@ -1458,7 +1470,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::updateStackNodes() {
     auto &Calls = It.getSecond();
     // Skip single calls with a single stack id. These don't need a new node.
     if (Calls.size() == 1) {
-      auto &Ids = std::get<1>(Calls[0]);
+      auto &Ids = Calls[0].StackIds;
       if (Ids.size() == 1)
         continue;
     }
@@ -1474,14 +1486,14 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::updateStackNodes() {
     // that to sort by.
     DenseMap<const FuncTy *, unsigned> FuncToIndex;
     for (const auto &[Idx, CallCtxInfo] : enumerate(Calls))
-      FuncToIndex.insert({std::get<2>(CallCtxInfo), Idx});
+      FuncToIndex.insert({CallCtxInfo.Func, Idx});
     std::stable_sort(
         Calls.begin(), Calls.end(),
         [&FuncToIndex](const CallContextInfo &A, const CallContextInfo &B) {
-          auto &IdsA = std::get<1>(A);
-          auto &IdsB = std::get<1>(B);
-          auto *FuncA = std::get<2>(A);
-          auto *FuncB = std::get<2>(B);
+          auto &IdsA = A.StackIds;
+          auto &IdsB = B.StackIds;
+          auto *FuncA = A.Func;
+          auto *FuncB = B.Func;
           return IdsA.size() > IdsB.size() ||
                  (IdsA.size() == IdsB.size() &&
                   (IdsA < IdsB ||
@@ -1520,7 +1532,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::updateStackNodes() {
 #ifndef NDEBUG
       // If this call has a different set of ids than the last one, clear the
       // set used to ensure they are sorted properly.
-      if (I > 0 && Ids != std::get<1>(Calls[I - 1]))
+      if (I > 0 && Ids != Calls[I - 1].StackIds)
         MatchingIdsFuncSet.clear();
       else
         // If the prior call had the same stack ids this set would not be empty.
@@ -1607,17 +1619,18 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::updateStackNodes() {
       // assigned to the same context node, and skip them.
       bool DuplicateContextIds = false;
       for (unsigned J = I + 1; J < Calls.size(); J++) {
-        auto &NextIds = std::get<1>(Calls[J]);
+        auto &CallCtxInfo = Calls[J];
+        auto &NextIds = CallCtxInfo.StackIds;
         if (NextIds != Ids)
           break;
-        auto *NextFunc = std::get<2>(Calls[J]);
+        auto *NextFunc = CallCtxInfo.Func;
         if (NextFunc != Func) {
           // We have another Call with the same ids but that cannot share this
           // node, must duplicate ids for it.
           DuplicateContextIds = true;
           break;
         }
-        auto &NextCall = std::get<0>(Calls[J]);
+        auto &NextCall = CallCtxInfo.Call;
         CallToMatchingCall[NextCall] = Call;
         // Update I so that it gets incremented correctly to skip this call.
         I = J;

>From 6aade744992446ae91b490ba34ce3057a4100e70 Mon Sep 17 00:00:00 2001
From: Teresa Johnson <tejohnson at google.com>
Date: Tue, 10 Sep 2024 15:31:01 -0700
Subject: [PATCH 2/2] Address comments

---
 .../Transforms/IPO/MemProfContextDisambiguation.cpp | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index f1fe205ce4e2c2..fa25baee2ba032 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -1490,14 +1490,11 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::updateStackNodes() {
     std::stable_sort(
         Calls.begin(), Calls.end(),
         [&FuncToIndex](const CallContextInfo &A, const CallContextInfo &B) {
-          auto &IdsA = A.StackIds;
-          auto &IdsB = B.StackIds;
-          auto *FuncA = A.Func;
-          auto *FuncB = B.Func;
-          return IdsA.size() > IdsB.size() ||
-                 (IdsA.size() == IdsB.size() &&
-                  (IdsA < IdsB ||
-                   (IdsA == IdsB && FuncToIndex[FuncA] < FuncToIndex[FuncB])));
+          return A.StackIds.size() > B.StackIds.size() ||
+                 (A.StackIds.size() == B.StackIds.size() &&
+                  (A.StackIds < B.StackIds ||
+                   (A.StackIds == B.StackIds &&
+                    FuncToIndex[A.Func] < FuncToIndex[B.Func])));
         });
 
     // Find the node for the last stack id, which should be the same



More information about the llvm-commits mailing list