[llvm] [MemProf] Perform cloning for each allocation separately (PR #87112)

via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 29 14:24:15 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: Teresa Johnson (teresajohnson)

<details>
<summary>Changes</summary>

Restructures the cloning slightly to perform all cloning for each
allocation separately. The prior algorithm would sometimes miss cloning
opportunities in cases where trimmed cold contexts partially overlapped
with longer contexts for different allocations.

Most of the change is isolated to the helpers that move edges to new or
existing clones, which now support moving a subset of context ids.


---

Patch is 20.91 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/87112.diff


2 Files Affected:

- (modified) llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp (+112-32) 
- (added) llvm/test/Transforms/MemProfContextDisambiguation/overlapping-contexts.ll (+232) 


``````````diff
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 4e4a4999776692..b9d84d583f4957 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -526,25 +526,30 @@ class CallsiteContextGraph {
   /// Create a clone of Edge's callee and move Edge to that new callee node,
   /// performing the necessary context id and allocation type updates.
   /// If callee's caller edge iterator is supplied, it is updated when removing
-  /// the edge from that list.
+  /// the edge from that list. If ContextIdsToMove is non-empty, only that
+  /// subset of Edge's ids are moved to an edge to the new callee.
   ContextNode *
   moveEdgeToNewCalleeClone(const std::shared_ptr<ContextEdge> &Edge,
-                           EdgeIter *CallerEdgeI = nullptr);
+                           EdgeIter *CallerEdgeI = nullptr,
+                           DenseSet<uint32_t> ContextIdsToMove = {});
 
   /// Change the callee of Edge to existing callee clone NewCallee, performing
   /// the necessary context id and allocation type updates.
   /// If callee's caller edge iterator is supplied, it is updated when removing
-  /// the edge from that list.
+  /// the edge from that list. If ContextIdsToMove is non-empty, only that
+  /// subset of Edge's ids are moved to an edge to the new callee.
   void moveEdgeToExistingCalleeClone(const std::shared_ptr<ContextEdge> &Edge,
                                      ContextNode *NewCallee,
                                      EdgeIter *CallerEdgeI = nullptr,
-                                     bool NewClone = false);
+                                     bool NewClone = false,
+                                     DenseSet<uint32_t> ContextIdsToMove = {});
 
   /// Recursively perform cloning on the graph for the given Node and its
   /// callers, in order to uniquely identify the allocation behavior of an
-  /// allocation given its context.
-  void identifyClones(ContextNode *Node,
-                      DenseSet<const ContextNode *> &Visited);
+  /// allocation given its context. The context ids of the allocation being
+  /// processed are given in AllocContextIds.
+  void identifyClones(ContextNode *Node, DenseSet<const ContextNode *> &Visited,
+                      const DenseSet<uint32_t> &AllocContextIds);
 
   /// Map from each context ID to the AllocationType assigned to that context.
   std::map<uint32_t, AllocationType> ContextIdToAllocationType;
@@ -2358,7 +2363,8 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::exportToDot(
 template <typename DerivedCCG, typename FuncTy, typename CallTy>
 typename CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::ContextNode *
 CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::moveEdgeToNewCalleeClone(
-    const std::shared_ptr<ContextEdge> &Edge, EdgeIter *CallerEdgeI) {
+    const std::shared_ptr<ContextEdge> &Edge, EdgeIter *CallerEdgeI,
+    DenseSet<uint32_t> ContextIdsToMove) {
   ContextNode *Node = Edge->Callee;
   NodeOwner.push_back(
       std::make_unique<ContextNode>(Node->IsAllocation, Node->Call));
@@ -2366,7 +2372,8 @@ CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::moveEdgeToNewCalleeClone(
   Node->addClone(Clone);
   assert(NodeToCallingFunc.count(Node));
   NodeToCallingFunc[Clone] = NodeToCallingFunc[Node];
-  moveEdgeToExistingCalleeClone(Edge, Clone, CallerEdgeI, /*NewClone=*/true);
+  moveEdgeToExistingCalleeClone(Edge, Clone, CallerEdgeI, /*NewClone=*/true,
+                                ContextIdsToMove);
   return Clone;
 }
 
@@ -2374,23 +2381,81 @@ template <typename DerivedCCG, typename FuncTy, typename CallTy>
 void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
     moveEdgeToExistingCalleeClone(const std::shared_ptr<ContextEdge> &Edge,
                                   ContextNode *NewCallee, EdgeIter *CallerEdgeI,
-                                  bool NewClone) {
+                                  bool NewClone,
+                                  DenseSet<uint32_t> ContextIdsToMove) {
   // NewCallee and Edge's current callee must be clones of the same original
   // node (Edge's current callee may be the original node too).
   assert(NewCallee->getOrigNode() == Edge->Callee->getOrigNode());
-  auto &EdgeContextIds = Edge->getContextIds();
+
   ContextNode *OldCallee = Edge->Callee;
-  if (CallerEdgeI)
-    *CallerEdgeI = OldCallee->CallerEdges.erase(*CallerEdgeI);
-  else
-    OldCallee->eraseCallerEdge(Edge.get());
-  Edge->Callee = NewCallee;
-  NewCallee->CallerEdges.push_back(Edge);
-  // Don't need to update Edge's context ids since we are simply reconnecting
-  // it.
-  set_subtract(OldCallee->ContextIds, EdgeContextIds);
-  NewCallee->ContextIds.insert(EdgeContextIds.begin(), EdgeContextIds.end());
-  NewCallee->AllocTypes |= Edge->AllocTypes;
+
+  // We might already have an edge to the new callee from earlier cloning for a
+  // different allocation. If one exists we will reuse it.
+  auto ExistingEdgeToNewCallee = NewCallee->findEdgeFromCaller(Edge->Caller);
+
+  // Callers will pass an empty ContextIdsToMove set when they want to move the
+  // edge. Copy in Edge's ids for simplicity.
+  if (ContextIdsToMove.empty())
+    ContextIdsToMove = Edge->getContextIds();
+
+  // If we are moving all of Edge's ids, then just move the whole Edge.
+  // Otherwise only move the specified subset, to a new edge if needed.
+  if (Edge->getContextIds().size() == ContextIdsToMove.size()) {
+    // Moving the whole Edge.
+    if (CallerEdgeI)
+      *CallerEdgeI = OldCallee->CallerEdges.erase(*CallerEdgeI);
+    else
+      OldCallee->eraseCallerEdge(Edge.get());
+    if (ExistingEdgeToNewCallee) {
+      // Since we already have an edge to NewCallee, simply move the ids
+      // onto it, and remove the existing Edge.
+      ExistingEdgeToNewCallee->getContextIds().insert(ContextIdsToMove.begin(),
+                                                      ContextIdsToMove.end());
+      ExistingEdgeToNewCallee->AllocTypes |= Edge->AllocTypes;
+      assert(Edge->ContextIds == ContextIdsToMove);
+      Edge->ContextIds.clear();
+      Edge->AllocTypes = (uint8_t)AllocationType::None;
+      Edge->Caller->eraseCalleeEdge(Edge.get());
+    } else {
+      // Otherwise just reconnect Edge to NewCallee.
+      Edge->Callee = NewCallee;
+      NewCallee->CallerEdges.push_back(Edge);
+      // Don't need to update Edge's context ids since we are simply
+      // reconnecting it.
+    }
+    // In either case, need to update the alloc types on New Callee.
+    NewCallee->AllocTypes |= Edge->AllocTypes;
+  } else {
+    // Only moving a subset of Edge's ids.
+    if (CallerEdgeI)
+      ++CallerEdgeI;
+    // Compute the alloc type of the subset of ids being moved.
+    auto CallerEdgeAllocType = computeAllocType(ContextIdsToMove);
+    if (ExistingEdgeToNewCallee) {
+      // Since we already have an edge to NewCallee, simply move the ids
+      // onto it.
+      ExistingEdgeToNewCallee->getContextIds().insert(ContextIdsToMove.begin(),
+                                                      ContextIdsToMove.end());
+      ExistingEdgeToNewCallee->AllocTypes |= CallerEdgeAllocType;
+    } else {
+      // Otherwise, create a new edge to NewCallee for the ids being moved.
+      auto NewEdge = std::make_shared<ContextEdge>(
+          NewCallee, Edge->Caller, CallerEdgeAllocType, ContextIdsToMove);
+      Edge->Caller->CalleeEdges.push_back(NewEdge);
+      NewCallee->CallerEdges.push_back(NewEdge);
+    }
+    // In either case, need to update the alloc types on NewCallee, and remove
+    // those ids and update the alloc type on the original Edge.
+    NewCallee->AllocTypes |= CallerEdgeAllocType;
+    set_subtract(Edge->ContextIds, ContextIdsToMove);
+    Edge->AllocTypes = computeAllocType(Edge->ContextIds);
+  }
+  // Now perform some updates that are common to all cases: the NewCallee gets
+  // the moved ids added, and we need to remove those ids from OldCallee and
+  // update its alloc type (NewCallee alloc type updates handled above).
+  NewCallee->ContextIds.insert(ContextIdsToMove.begin(),
+                               ContextIdsToMove.end());
+  set_subtract(OldCallee->ContextIds, ContextIdsToMove);
   OldCallee->AllocTypes = computeAllocType(OldCallee->ContextIds);
   // OldCallee alloc type should be None iff its context id set is now empty.
   assert((OldCallee->AllocTypes == (uint8_t)AllocationType::None) ==
@@ -2402,7 +2467,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
     // The context ids moving to the new callee are the subset of this edge's
     // context ids and the context ids on the caller edge being moved.
     DenseSet<uint32_t> EdgeContextIdsToMove =
-        set_intersection(OldCalleeEdge->getContextIds(), EdgeContextIds);
+        set_intersection(OldCalleeEdge->getContextIds(), ContextIdsToMove);
     set_subtract(OldCalleeEdge->getContextIds(), EdgeContextIdsToMove);
     OldCalleeEdge->AllocTypes =
         computeAllocType(OldCalleeEdge->getContextIds());
@@ -2468,8 +2533,10 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::
 template <typename DerivedCCG, typename FuncTy, typename CallTy>
 void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones() {
   DenseSet<const ContextNode *> Visited;
-  for (auto &Entry : AllocationCallToContextNodeMap)
-    identifyClones(Entry.second, Visited);
+  for (auto &Entry : AllocationCallToContextNodeMap) {
+    Visited.clear();
+    identifyClones(Entry.second, Visited, Entry.second->ContextIds);
+  }
   Visited.clear();
   for (auto &Entry : AllocationCallToContextNodeMap)
     recursivelyRemoveNoneTypeCalleeEdges(Entry.second, Visited);
@@ -2487,7 +2554,8 @@ bool checkColdOrNotCold(uint8_t AllocType) {
 
 template <typename DerivedCCG, typename FuncTy, typename CallTy>
 void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
-    ContextNode *Node, DenseSet<const ContextNode *> &Visited) {
+    ContextNode *Node, DenseSet<const ContextNode *> &Visited,
+    const DenseSet<uint32_t> &AllocContextIds) {
   if (VerifyNodes)
     checkNode<DerivedCCG, FuncTy, CallTy>(Node, /*CheckEdges=*/false);
   assert(!Node->CloneOf);
@@ -2521,7 +2589,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
       }
       // Ignore any caller we previously visited via another edge.
       if (!Visited.count(Edge->Caller) && !Edge->Caller->CloneOf) {
-        identifyClones(Edge->Caller, Visited);
+        identifyClones(Edge->Caller, Visited, AllocContextIds);
       }
     }
   }
@@ -2584,13 +2652,23 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
     if (hasSingleAllocType(Node->AllocTypes) || Node->CallerEdges.size() <= 1)
       break;
 
+    // Only need to process the ids along this edge pertaining to the given
+    // allocation.
+    auto CallerEdgeContextsForAlloc =
+        set_intersection(CallerEdge->getContextIds(), AllocContextIds);
+    if (CallerEdgeContextsForAlloc.empty()) {
+      ++EI;
+      continue;
+    }
+    auto CallerAllocTypeForAlloc = computeAllocType(CallerEdgeContextsForAlloc);
+
     // Compute the node callee edge alloc types corresponding to the context ids
     // for this caller edge.
     std::vector<uint8_t> CalleeEdgeAllocTypesForCallerEdge;
     CalleeEdgeAllocTypesForCallerEdge.reserve(Node->CalleeEdges.size());
     for (auto &CalleeEdge : Node->CalleeEdges)
       CalleeEdgeAllocTypesForCallerEdge.push_back(intersectAllocTypes(
-          CalleeEdge->getContextIds(), CallerEdge->getContextIds()));
+          CalleeEdge->getContextIds(), CallerEdgeContextsForAlloc));
 
     // Don't clone if doing so will not disambiguate any alloc types amongst
     // caller edges (including the callee edges that would be cloned).
@@ -2605,7 +2683,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
     // disambiguated by splitting out different context ids.
     assert(CallerEdge->AllocTypes != (uint8_t)AllocationType::None);
     assert(Node->AllocTypes != (uint8_t)AllocationType::None);
-    if (allocTypeToUse(CallerEdge->AllocTypes) ==
+    if (allocTypeToUse(CallerAllocTypeForAlloc) ==
             allocTypeToUse(Node->AllocTypes) &&
         allocTypesMatch<DerivedCCG, FuncTy, CallTy>(
             CalleeEdgeAllocTypesForCallerEdge, Node->CalleeEdges)) {
@@ -2618,7 +2696,7 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
     ContextNode *Clone = nullptr;
     for (auto *CurClone : Node->Clones) {
       if (allocTypeToUse(CurClone->AllocTypes) !=
-          allocTypeToUse(CallerEdge->AllocTypes))
+          allocTypeToUse(CallerAllocTypeForAlloc))
         continue;
 
       if (!allocTypesMatch<DerivedCCG, FuncTy, CallTy>(
@@ -2630,9 +2708,11 @@ void CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::identifyClones(
 
     // The edge iterator is adjusted when we move the CallerEdge to the clone.
     if (Clone)
-      moveEdgeToExistingCalleeClone(CallerEdge, Clone, &EI);
+      moveEdgeToExistingCalleeClone(CallerEdge, Clone, &EI, /*NewClone=*/false,
+                                    CallerEdgeContextsForAlloc);
     else
-      Clone = moveEdgeToNewCalleeClone(CallerEdge, &EI);
+      Clone =
+          moveEdgeToNewCalleeClone(CallerEdge, &EI, CallerEdgeContextsForAlloc);
 
     assert(EI == Node->CallerEdges.end() ||
            Node->AllocTypes != (uint8_t)AllocationType::None);
diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/overlapping-contexts.ll b/llvm/test/Transforms/MemProfContextDisambiguation/overlapping-contexts.ll
new file mode 100644
index 00000000000000..7fe9dc96921c6a
--- /dev/null
+++ b/llvm/test/Transforms/MemProfContextDisambiguation/overlapping-contexts.ll
@@ -0,0 +1,232 @@
+;; This test ensures that the logic which assigns calls to stack nodes
+;; correctly handles cloning of a callsite for a trimmed cold context
+;; that partially overlaps with a longer context for a different allocation.
+
+;; The profile data and call stacks were all manually added, but the code
+;; would be structured something like the following (fairly contrived to
+;; result in the type of control flow needed to test):
+
+;; void A(bool b) {
+;;   if (b)
+;;     // cold: stack ids 10, 12, 13, 15 (trimmed ids 19, 20)
+;;     // not cold: stack ids 10, 12, 13, 14 (trimmed id 21)
+;;     new char[10]; // stack id 10
+;;   else
+;;     // not cold: stack ids 11, 12, 13, 15, 16, 17 (trimmed id 22)
+;;     // cold: stack ids 11, 12, 13, 15, 16, 18 (trimmed id 23)
+;;     new char[10]; // stack id 11
+;; }
+;;
+;; void X(bool b) {
+;;   A(b); // stack ids 12
+;; }
+;;
+;; void B(bool b) {
+;;   X(b); // stack id 13
+;; }
+;;
+;; void D() {
+;;   B(true); // stack id 14
+;; }
+;;
+;; void C(bool b) {
+;;   B(b); // stack id 15
+;; }
+;;
+;; void E(bool b) {
+;;   C(b); // stack id 16
+;; }
+;;
+;; void F() {
+;;   E(false); // stack id 17
+;; }
+;;
+;; void G() {
+;;   E(false); // stack id 18
+;; }
+;;
+;; void M() {
+;;   C(true); // stack id 19
+;; }
+;;
+;; int main() {
+;;   D(); // stack id 20 (leads to not cold allocation)
+;;   M(); // stack id 21 (leads to cold allocation)
+;;   F(); // stack id 22 (leads to not cold allocation)
+;;   G(); // stack id 23 (leads to cold allocation)
+;; }
+
+;; -stats requires asserts
+; REQUIRES: asserts
+
+; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \
+; RUN:	-memprof-verify-ccg -memprof-verify-nodes \
+; RUN:  -stats -pass-remarks=memprof-context-disambiguation \
+; RUN:	%s -S 2>&1 | FileCheck %s --check-prefix=IR \
+; RUN:  --check-prefix=STATS --check-prefix=REMARKS
+
+; REMARKS: created clone _Z1Ab.memprof.1
+; REMARKS: created clone _Z1Xb.memprof.1
+; REMARKS: created clone _Z1Bb.memprof.1
+; REMARKS: created clone _Z1Cb.memprof.1
+; REMARKS: created clone _Z1Eb.memprof.1
+; REMARKS: call in clone _Z1Gv assigned to call function clone _Z1Eb.memprof.1
+; REMARKS: call in clone _Z1Eb.memprof.1 assigned to call function clone _Z1Cb.memprof.1
+;; If we don't perform cloning for each allocation separately, we will miss
+;; cloning _Z1Cb for the trimmed cold allocation context leading to the
+;; allocation at stack id 10.
+; REMARKS: call in clone _Z1Cb.memprof.1 assigned to call function clone _Z1Bb.memprof.1
+; REMARKS: call in clone _Z1Fv assigned to call function clone _Z1Eb
+; REMARKS: call in clone _Z1Eb assigned to call function clone _Z1Cb
+; REMARKS: call in clone _Z1Cb assigned to call function clone _Z1Bb.memprof.1
+; REMARKS: call in clone _Z1Bb.memprof.1 assigned to call function clone _Z1Xb.memprof.1
+; REMARKS: call in clone _Z1Xb.memprof.1 assigned to call function clone _Z1Ab.memprof.1
+; REMARKS: call in clone _Z1Ab.memprof.1 marked with memprof allocation attribute cold
+; REMARKS: call in clone _Z1Bb.memprof.1 assigned to call function clone _Z1Xb
+; REMARKS: call in clone _Z1Dv assigned to call function clone _Z1Bb
+; REMARKS: call in clone _Z1Bb assigned to call function clone _Z1Xb
+; REMARKS: call in clone _Z1Xb assigned to call function clone _Z1Ab
+; REMARKS: call in clone _Z1Ab marked with memprof allocation attribute notcold
+; REMARKS: call in clone _Z1Ab.memprof.1 marked with memprof allocation attribute cold
+; REMARKS: call in clone _Z1Ab marked with memprof allocation attribute notcold
+
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define dso_local void @_Z1Ab(i1 noundef zeroext %b) {
+entry:
+  br i1 %b, label %if.then, label %if.else
+
+if.then:
+  %call = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #7, !memprof !0, !callsite !10
+  br label %if.end
+
+if.else:
+  %call2 = call noalias noundef nonnull ptr @_Znam(i64 noundef 10) #7, !memprof !5, !callsite !11
+  br label %if.end
+
+if.end:
+  ret void
+}
+
+; Function Attrs: nobuiltin
+declare ptr @_Znam(i64) #0
+
+define dso_local void @_Z1Xb(i1 noundef zeroext %b) {
+entry:
+  tail call void @_Z1Ab(i1 noundef zeroext %b), !callsite !12
+  ret void
+}
+
+define dso_local void @_Z1Bb(i1 noundef zeroext %b) {
+entry:
+  tail call void @_Z1Xb(i1 noundef zeroext %b), !callsite !13
+  ret void
+}
+
+define dso_local void @_Z1Dv() {
+entry:
+  tail call void @_Z1Bb(i1 noundef zeroext true), !callsite !14
+  ret void
+}
+
+define dso_local void @_Z1Cb(i1 noundef zeroext %b) {
+entry:
+  tail call void @_Z1Bb(i1 noundef zeroext %b), !callsite !15
+  ret void
+}
+
+define dso_local void @_Z1Eb(i1 noundef zeroext %b) {
+entry:
+  tail call void @_Z1Cb(i1 noundef zeroext %b), !callsite !16
+  ret void
+}
+
+define dso_local void @_Z1Fv() {
+entry:
+  tail call void @_Z1Eb(i1 noundef zeroext false), !callsite !17
+  ret void
+}
+
+define dso_local void @_Z1Gv() {
+entry:
+  tail call void @_Z1Eb(i1 noundef zeroext false), !callsite !18
+  ret void
+}
+
+define dso_local void @_Z1Mv() {
+entry:
+  tail call void @_Z1Cb(i1 noundef zeroext true), !callsite !19
+  ret void
+}
+
+define dso_local noundef i32 @main() local_unnamed_addr {
+entry:
+  tail call void @_Z1Dv(), !callsite !20 ;; Not cold context
+  tail call void @_Z1Mv(), !callsite !21 ;; Cold context
+  tail call void @_Z1Fv(), !callsite !22 ;; Not cold context
+  tail call void @_Z1Gv(), !callsite !23 ;; Cold context
+  ret i32 0
+}
+
+attributes #0 = { nobuiltin }
+attributes #7 = { builtin }
+
+!0 = !{!1, !3}
+;; Cold (trimmed) context via call to _Z1Dv in main
+!1 = !{!2, !"cold"}
+!2 = !{i64 10, i64 12, i64 13, i64 15}
+;; Not cold (trimmed) context via call to _Z1Mv in main
+!3 = !{!4, !"notcold"}
+!4 = !{i64 10, i64 12, i64 13, i64 14}
+!5 = !{!6, !8}
+;; Not cold (trimmed) context via call to _Z1Fv in main
+!6 = !{!7, !"notcold"}
+!7 = !{i64 11, i64 12, i64 13, i64 15, i64 16, i64 17}
+;; Cold (trimmed) context via call to _Z1Gv in main
+!8 = !{!9, !"cold"}
+!9 = !{i64 11, i64 12, i64 13, i64 15, i64 16, i64 18}
+!10 = !{i64 10}
+!11 = !{i64 11}
+!12 = !{i64 12}
+!13 = !{i64 13}
+!14 = !{i64 14}
+!15 = !{i64 15}
+!16 = !{i64 16}
+!17 = !{i64 17}
+!18 = !{i64 18}
+!19 = !{i64 19}
+!20 = !{i64 20}
+!21 = !{i64 21}
+!22 = !{i64 22}
+!23 = !{i64 23}
+
+; IR: define {{.*}} @_Z1Cb(i1 noundef zeroext %b)
+; IR-NEXT: entry:
+; IR-NEXT:   call {{.*}} @_Z1Bb.memprof.1(i1 noundef zeroext %b)
+
+; IR: define {{.*}} @_Z1Ab.memprof.1(i1 noundef zeroext %b)
+; IR-NEXT: entry:
+; IR-NEXT:...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/87112


More information about the llvm-commits mailing list