[llvm] [Attributor] New attribute to identify what byte ranges are alive for an allocation (PR #66148)

Vidhush Singhal via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 20 12:54:54 PDT 2023


https://github.com/vidsinghal updated https://github.com/llvm/llvm-project/pull/66148

>From 8758c5820ef6aadd90e1c5e794ca95f91e57ed52 Mon Sep 17 00:00:00 2001
From: vidsinghal <vidush.sl at gmail.com>
Date: Fri, 4 Aug 2023 00:06:56 -0400
Subject: [PATCH] [Attributor] New attribute to identify what byte ranges are
 alive for an allocation

Changes the size of allocations automatically.
Only implements the case when a single range from start of the allocation is alive.

Differential Revision: https://reviews.llvm.org/D157068
---
 llvm/include/llvm/Transforms/IPO/Attributor.h |  43 ++
 llvm/lib/Transforms/IPO/Attributor.cpp        |  15 +-
 .../Transforms/IPO/AttributorAttributes.cpp   | 249 +++++++++-
 llvm/test/Transforms/Attributor/allocator.ll  | 432 ++++++++++++++++++
 llvm/test/Transforms/Attributor/depgraph.ll   | 294 ------------
 llvm/test/Transforms/Attributor/nodelete.ll   |   1 +
 6 files changed, 730 insertions(+), 304 deletions(-)
 create mode 100644 llvm/test/Transforms/Attributor/allocator.ll

diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index bd1bd8261123e51..2fc46172c922a8e 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -103,6 +103,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SetOperations.h"
 #include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallSet.h"
 #include "llvm/ADT/iterator.h"
 #include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/Analysis/CFG.h"
@@ -132,6 +133,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/ModRef.h"
 #include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/TypeSize.h"
 #include "llvm/TargetParser/Triple.h"
 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
 
@@ -6117,6 +6119,12 @@ struct AAPointerInfo : public AbstractAttribute {
   /// See AbstractAttribute::getIdAddr()
   const char *getIdAddr() const override { return &ID; }
 
+  using OffsetBinsTy = DenseMap<AA::RangeTy, SmallSet<unsigned, 4>>;
+  using const_bin_iterator = OffsetBinsTy::const_iterator;
+  virtual const_bin_iterator begin() const = 0;
+  virtual const_bin_iterator end() const = 0;
+  virtual int64_t numOffsetBins() const = 0;
+
   /// Call \p CB on all accesses that might interfere with \p Range and return
   /// true if all such accesses were known and the callback returned true for
   /// all of them, false otherwise. An access interferes with an offset-size
@@ -6270,6 +6278,41 @@ struct AAAddressSpace : public StateWrapper<BooleanState, AbstractAttribute> {
   static const char ID;
 };
 
+struct AAAllocationInfo : public StateWrapper<BooleanState, AbstractAttribute> {
+  AAAllocationInfo(const IRPosition &IRP, Attributor &A)
+      : StateWrapper<BooleanState, AbstractAttribute>(IRP) {}
+
+  /// See AbstractAttribute::isValidIRPositionForInit
+  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
+    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
+      return false;
+    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
+  }
+
+  /// Create an abstract attribute view for the position \p IRP.
+  static AAAllocationInfo &createForPosition(const IRPosition &IRP,
+                                             Attributor &A);
+
+  virtual std::optional<TypeSize> getAllocatedSize() const = 0;
+
+  /// See AbstractAttribute::getName()
+  const std::string getName() const override { return "AAAllocationInfo"; }
+
+  /// See AbstractAttribute::getIdAddr()
+  const char *getIdAddr() const override { return &ID; }
+
+  /// This function should return true if the type of the \p AA is
+  /// AAAllocationInfo
+  static bool classof(const AbstractAttribute *AA) {
+    return (AA->getIdAddr() == &ID);
+  }
+
+  constexpr static const std::optional<TypeSize> HasNoAllocationSize =
+      std::optional<TypeSize>(TypeSize(-1, true));
+
+  static const char ID;
+};
+
 /// An abstract interface for llvm::GlobalValue information interference.
 struct AAGlobalValueInfo
     : public StateWrapper<BooleanState, AbstractAttribute> {
diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp
index 1ffafc65ba63a4f..77f5dff3cbd7ff2 100644
--- a/llvm/lib/Transforms/IPO/Attributor.cpp
+++ b/llvm/lib/Transforms/IPO/Attributor.cpp
@@ -3610,14 +3610,13 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
   };
 
   auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
-  bool Success;
+  [[maybe_unused]] bool Success;
   bool UsedAssumedInformation = false;
   Success = checkForAllInstructionsImpl(
       nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
       {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
        (unsigned)Instruction::Call},
       UsedAssumedInformation);
-  (void)Success;
   assert(Success && "Expected the check call to be successful!");
 
   auto LoadStorePred = [&](Instruction &I) -> bool {
@@ -3643,7 +3642,17 @@ void Attributor::identifyDefaultAbstractAttributes(Function &F) {
       nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
       {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
       UsedAssumedInformation);
-  (void)Success;
+  assert(Success && "Expected the check call to be successful!");
+
+  // AllocaInstPredicate
+  auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
+    getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
+    return true;
+  };
+
+  Success = checkForAllInstructionsImpl(
+      nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
+      {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
   assert(Success && "Expected the check call to be successful!");
 }
 
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 03b5dc3899ac8f8..813ab99b097837c 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -64,7 +64,9 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/JSON.h"
 #include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TypeSize.h"
 #include "llvm/Support/raw_ostream.h"
 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
@@ -192,6 +194,7 @@ PIPE_OPERATOR(AAPointerInfo)
 PIPE_OPERATOR(AAAssumptionInfo)
 PIPE_OPERATOR(AAUnderlyingObjects)
 PIPE_OPERATOR(AAAddressSpace)
+PIPE_OPERATOR(AAAllocationInfo)
 PIPE_OPERATOR(AAIndirectCallInfo)
 PIPE_OPERATOR(AAGlobalValueInfo)
 PIPE_OPERATOR(AADenormalFPMath)
@@ -881,11 +884,9 @@ struct AA::PointerInfo::State : public AbstractState {
                          AAPointerInfo::AccessKind Kind, Type *Ty,
                          Instruction *RemoteI = nullptr);
 
-  using OffsetBinsTy = DenseMap<RangeTy, SmallSet<unsigned, 4>>;
-
-  using const_bin_iterator = OffsetBinsTy::const_iterator;
-  const_bin_iterator begin() const { return OffsetBins.begin(); }
-  const_bin_iterator end() const { return OffsetBins.end(); }
+  AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); }
+  AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); }
+  int64_t numOffsetBins() const { return OffsetBins.size(); }
 
   const AAPointerInfo::Access &getAccess(unsigned Index) const {
     return AccessList[Index];
@@ -905,7 +906,7 @@ struct AA::PointerInfo::State : public AbstractState {
   // are all combined into a single Access object. This may result in loss of
   // information in RangeTy in the Access object.
   SmallVector<AAPointerInfo::Access> AccessList;
-  OffsetBinsTy OffsetBins;
+  AAPointerInfo::OffsetBinsTy OffsetBins;
   DenseMap<const Instruction *, SmallVector<unsigned>> RemoteIMap;
 
   /// See AAPointerInfo::forallInterferingAccesses.
@@ -1109,6 +1110,12 @@ struct AAPointerInfoImpl
     return AAPointerInfo::manifest(A);
   }
 
+  virtual const_bin_iterator begin() const override { return State::begin(); }
+  virtual const_bin_iterator end() const override { return State::end(); }
+  virtual int64_t numOffsetBins() const override {
+    return State::numOffsetBins();
+  }
+
   bool forallInterferingAccesses(
       AA::RangeTy Range,
       function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
@@ -6505,7 +6512,7 @@ struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
 
   /// See AbstractAttribute::updateImpl(...).
   ChangeStatus updateImpl(Attributor &A) override {
-        return indicatePessimisticFixpoint();
+    return indicatePessimisticFixpoint();
   }
 
   void trackStatistics() const override {
@@ -12658,6 +12665,232 @@ struct AAAddressSpaceCallSiteArgument final : AAAddressSpaceImpl {
 };
 } // namespace
 
+/// ----------- Allocation Info ----------
+namespace {
+struct AAAllocationInfoImpl : public AAAllocationInfo {
+  AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfo(IRP, A) {}
+
+  std::optional<TypeSize> getAllocatedSize() const override {
+    assert(isValidState() && "the AA is invalid");
+    return AssumedAllocatedSize;
+  }
+
+  ChangeStatus updateImpl(Attributor &A) override {
+
+    const IRPosition &IRP = getIRPosition();
+    Instruction *I = IRP.getCtxI();
+
+    if (!isa<AllocaInst>(I))
+      return indicatePessimisticFixpoint();
+
+    bool IsKnownNoCapture;
+    if (!AA::hasAssumedIRAttr<Attribute::NoCapture>(
+            A, this, IRP, DepClassTy::OPTIONAL, IsKnownNoCapture))
+      return indicatePessimisticFixpoint();
+
+    if (IsKnownNoCapture)
+      return indicatePessimisticFixpoint();
+
+    const AAPointerInfo *PI =
+        A.getOrCreateAAFor<AAPointerInfo>(IRP, *this, DepClassTy::REQUIRED);
+
+    if (!PI)
+      return indicatePessimisticFixpoint();
+
+    if (!PI->getState().isValidState())
+      return indicatePessimisticFixpoint();
+
+    int64_t BinSize = PI->numOffsetBins();
+    switch (BinSize) {
+    case 0: {
+      switch (I->getOpcode()) {
+      /*TODO: add case for malloc like calls*/
+      case Instruction::Alloca: {
+        AllocaInst *AI = cast<AllocaInst>(I);
+        const DataLayout &DL = A.getDataLayout();
+        const auto AllocationSize = AI->getAllocationSize(DL);
+
+        if (!AllocationSize || *AllocationSize == 0)
+          return indicatePessimisticFixpoint();
+
+        break;
+      }
+      default:
+        return indicatePessimisticFixpoint();
+      }
+
+      auto NewAllocationSize = std::optional<TypeSize>(TypeSize(0, false));
+
+      if (!changeAllocationSize(NewAllocationSize))
+        return ChangeStatus::UNCHANGED;
+      break;
+    }
+    case 1: {
+      const auto &It = PI->begin();
+      if (It->first.Offset == 0) {
+
+        uint64_t SizeOfBin = It->first.Offset + It->first.Size;
+        const DataLayout &DL = A.getDataLayout();
+        /*TODO: add case for malloc like calls*/
+        switch (I->getOpcode()) {
+        case Instruction::Alloca: {
+          AllocaInst *AI = cast<AllocaInst>(I);
+          const auto AllocationSize = AI->getAllocationSize(DL);
+
+          if (!AllocationSize || *AllocationSize == 0)
+            return indicatePessimisticFixpoint();
+
+          if (SizeOfBin == *AllocationSize)
+            return indicatePessimisticFixpoint();
+
+          break;
+        }
+        default:
+          return indicatePessimisticFixpoint();
+        }
+
+        auto NewAllocationSize =
+            std::optional<TypeSize>(TypeSize(SizeOfBin * 8, false));
+
+        if (!changeAllocationSize(NewAllocationSize))
+          return ChangeStatus::UNCHANGED;
+      } else
+        /*TODO: when access does not start at the 0th byte of the bin*/
+        return indicatePessimisticFixpoint();
+      break;
+    }
+    default: {
+      /*TODO: Handle for multiple Bins*/
+      return indicatePessimisticFixpoint();
+    }
+    }
+
+    return ChangeStatus::CHANGED;
+  }
+
+  /// See AbstractAttribute::manifest(...).
+  ChangeStatus manifest(Attributor &A) override {
+
+    assert(isValidState() &&
+           "Manifest should only be called if the state is valid.");
+
+    Instruction *I = getIRPosition().getCtxI();
+
+    auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue();
+
+    int NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8;
+
+    Type *CharType = Type::getInt8Ty(I->getContext());
+
+    auto *NumBytesToValue = llvm::ConstantInt::get(
+        I->getContext(), llvm::APInt(32, NumBytesToAllocate));
+
+    switch (I->getOpcode()) {
+    /*TODO: add case for malloc like calls*/
+    case Instruction::Alloca: {
+
+      AllocaInst *AI = cast<AllocaInst>(I);
+
+      AllocaInst *NewAllocaInst =
+          new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue,
+                         AI->getAlign(), AI->getName(), AI->getNextNode());
+
+      if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst))
+        return ChangeStatus::CHANGED;
+
+      break;
+    }
+    default:
+      break;
+    }
+
+    return llvm::ChangeStatus::UNCHANGED;
+  }
+
+  /// See AbstractAttribute::getAsStr().
+  const std::string getAsStr(Attributor *A) const override {
+    if (!isValidState())
+      return "allocationinfo(<invalid>)";
+    return "allocationinfo(" +
+           (AssumedAllocatedSize == HasNoAllocationSize
+                ? "none"
+                : std::to_string(AssumedAllocatedSize->getFixedValue())) +
+           ")";
+  }
+
+private:
+  std::optional<TypeSize> AssumedAllocatedSize = HasNoAllocationSize;
+
+  bool changeAllocationSize(std::optional<TypeSize> Size) {
+    if (AssumedAllocatedSize == HasNoAllocationSize ||
+        AssumedAllocatedSize != Size) {
+      AssumedAllocatedSize = Size;
+      return true;
+    }
+    return false;
+  }
+};
+
+struct AAAllocationInfoFloating : AAAllocationInfoImpl {
+  AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfoImpl(IRP, A) {}
+
+  void trackStatistics() const override {
+    STATS_DECLTRACK_FLOATING_ATTR(allocationinfo);
+  }
+};
+
+struct AAAllocationInfoReturned : AAAllocationInfoImpl {
+  AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfoImpl(IRP, A) {}
+
+  /// See AbstractAttribute::initialize(...).
+  void initialize(Attributor &A) override {
+    // TODO: we don't rewrite function argument for now because it will need to
+    // rewrite the function signature and all call sites
+    (void)indicatePessimisticFixpoint();
+  }
+
+  void trackStatistics() const override {
+    STATS_DECLTRACK_FNRET_ATTR(allocationinfo);
+  }
+};
+
+struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl {
+  AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfoImpl(IRP, A) {}
+
+  void trackStatistics() const override {
+    STATS_DECLTRACK_CSRET_ATTR(allocationinfo);
+  }
+};
+
+struct AAAllocationInfoArgument : AAAllocationInfoImpl {
+  AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfoImpl(IRP, A) {}
+
+  void trackStatistics() const override {
+    STATS_DECLTRACK_ARG_ATTR(allocationinfo);
+  }
+};
+
+struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl {
+  AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
+      : AAAllocationInfoImpl(IRP, A) {}
+
+  /// See AbstractAttribute::initialize(...).
+  void initialize(Attributor &A) override {
+
+    (void)indicatePessimisticFixpoint();
+  }
+
+  void trackStatistics() const override {
+    STATS_DECLTRACK_CSARG_ATTR(allocationinfo);
+  }
+};
+} // namespace
+
 const char AANoUnwind::ID = 0;
 const char AANoSync::ID = 0;
 const char AANoFree::ID = 0;
@@ -12691,6 +12924,7 @@ const char AAPointerInfo::ID = 0;
 const char AAAssumptionInfo::ID = 0;
 const char AAUnderlyingObjects::ID = 0;
 const char AAAddressSpace::ID = 0;
+const char AAAllocationInfo::ID = 0;
 const char AAIndirectCallInfo::ID = 0;
 const char AAGlobalValueInfo::ID = 0;
 const char AADenormalFPMath::ID = 0;
@@ -12824,6 +13058,7 @@ CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFPClass)
 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAddressSpace)
+CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAllocationInfo)
 
 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
diff --git a/llvm/test/Transforms/Attributor/allocator.ll b/llvm/test/Transforms/Attributor/allocator.ll
new file mode 100644
index 000000000000000..1a1b1fbb5eed5ca
--- /dev/null
+++ b/llvm/test/Transforms/Attributor/allocator.ll
@@ -0,0 +1,432 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals --version 2
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs  -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC
+
+%struct.Foo = type { i32, i32, i8 }
+
+ at .str = private unnamed_addr constant [17 x i8] c"The value is %d\0A\00", align 1
+
+;.
+; CHECK: @[[_STR:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [17 x i8] c"The value is %d\0A\00", align 1
+;.
+define dso_local void @positive_alloca_1(i32 noundef %val) #0 {
+; CHECK-LABEL: define dso_local void @positive_alloca_1
+; CHECK-SAME: (i32 noundef [[VAL:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[VAL_ADDR1:%.*]] = alloca i8, i32 4, align 4
+; CHECK-NEXT:    [[F2:%.*]] = alloca i8, i32 4, align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR1]], align 4
+; CHECK-NEXT:    store i32 10, ptr [[F2]], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[F2]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 1
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[F2]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[F2]], align 4
+; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP1]], [[VAL]]
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[ADD3]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %val.addr = alloca i64, align 4
+  %f = alloca %struct.Foo, align 4
+  store i32 %val, ptr %val.addr, align 4
+  %field1 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+  store i32 10, ptr %field1, align 4
+  %field11 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+  %0 = load i32, ptr %field11, align 4
+  %add = add nsw i32 %0, 1
+  store i32 %add, ptr %field11, align 4
+  %field12 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0
+  %1 = load i32, ptr %field12, align 4
+  %2 = load i32, ptr %val.addr, align 4
+  %add3 = add nsw i32 %1, %2
+  %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %add3)
+  ret void
+}
+
+; TODO: change malloc like call
+; Function Attrs: noinline nounwind uwtable
+define dso_local void @positive_malloc_1(ptr noundef %val) #0 {
+; CHECK-LABEL: define dso_local void @positive_malloc_1
+; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[F:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    store ptr [[VAL]], ptr [[VAL_ADDR]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 12)
+; CHECK-NEXT:    store ptr [[CALL]], ptr [[F]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP0]], 10
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[CALL]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
+; CHECK-NEXT:    [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %val.addr = alloca ptr, align 8
+  %f = alloca ptr, align 8
+  store ptr %val, ptr %val.addr, align 8
+  %call = call noalias ptr @malloc(i64 noundef 12) #3
+  store ptr %call, ptr %f, align 8
+  %0 = load ptr, ptr %val.addr, align 8
+  %1 = load i32, ptr %0, align 4
+  %add = add nsw i32 %1, 10
+  %2 = load ptr, ptr %f, align 8
+  %a = getelementptr inbounds %struct.Foo, ptr %2, i32 0, i32 0
+  store i32 %add, ptr %a, align 4
+  %3 = load ptr, ptr %f, align 8
+  %a1 = getelementptr inbounds %struct.Foo, ptr %3, i32 0, i32 0
+  %4 = load i32, ptr %a1, align 4
+  %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %4)
+  ret void
+}
+
+; TODO: change malloc like call
+; Function Attrs: noinline nounwind uwtable
+define dso_local void @positive_malloc_2(ptr noundef %val) #0 {
+; CHECK-LABEL: define dso_local void @positive_malloc_2
+; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[F:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    store ptr [[VAL]], ptr [[VAL_ADDR]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 60)
+; CHECK-NEXT:    store ptr [[CALL]], ptr [[F]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[CALL]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
+; CHECK-NEXT:    [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %val.addr = alloca ptr, align 8
+  %x = alloca i32, align 4
+  %f = alloca ptr, align 8
+  store ptr %val, ptr %val.addr, align 8
+  store i32 15, ptr %x, align 4
+  %0 = load i32, ptr %x, align 4
+  %conv = sext i32 %0 to i64
+  %mul = mul i64 4, %conv
+  %call = call noalias ptr @malloc(i64 noundef %mul)
+  store ptr %call, ptr %f, align 8
+  %1 = load ptr, ptr %val.addr, align 8
+  %2 = load i32, ptr %1, align 4
+  %3 = load ptr, ptr %f, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %3, i64 0
+  store i32 %2, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %f, align 8
+  %arrayidx1 = getelementptr inbounds i32, ptr %4, i64 0
+  %5 = load i32, ptr %arrayidx1, align 4
+  %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5)
+  ret void
+}
+
+; Function Attrs: noinline nounwind uwtable
+define dso_local ptr @negative_test_escaping_pointer(i32 noundef %val) #0 {
+; CHECK-LABEL: define dso_local ptr @negative_test_escaping_pointer
+; CHECK-SAME: (i32 noundef [[VAL:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[F:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR]], align 4
+; CHECK-NEXT:    [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 16)
+; CHECK-NEXT:    store ptr [[CALL]], ptr [[F]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[F]], align 8
+; CHECK-NEXT:    store i32 2, ptr [[TMP0]], align 8
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 10, [[VAL]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[F]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP2]], [[ADD]]
+; CHECK-NEXT:    store i32 [[ADD2]], ptr [[TMP1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[F]], align 8
+; CHECK-NEXT:    ret ptr [[TMP3]]
+;
+entry:
+  %val.addr = alloca i32, align 4
+  %f = alloca ptr, align 8
+  store i32 %val, ptr %val.addr, align 4
+  %call = call noalias ptr @malloc(i64 noundef 16) #2
+  store ptr %call, ptr %f, align 8
+  %0 = load ptr, ptr %f, align 8
+  %field1 = getelementptr inbounds %struct.Foo, ptr %0, i32 0, i32 0
+  store i32 2, ptr %field1, align 8
+  %1 = load i32, ptr %val.addr, align 4
+  %add = add nsw i32 10, %1
+  %2 = load ptr, ptr %f, align 8
+  %field11 = getelementptr inbounds %struct.Foo, ptr %2, i32 0, i32 0
+  %3 = load i32, ptr %field11, align 8
+  %add2 = add nsw i32 %3, %add
+  store i32 %add2, ptr %field11, align 8
+  %4 = load ptr, ptr %f, align 8
+  ret ptr %4
+}
+
+
+;TODO: The allocation can be reduced here.
+;However, the offsets (load/store etc.) Need to be changed.
+; Function Attrs: noinline nounwind uwtable
+define dso_local { i64, ptr } @positive_test_not_a_single_start_offset(i32 noundef %val) #0 {
+; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none)
+; CHECK-LABEL: define dso_local { i64, ptr } @positive_test_not_a_single_start_offset
+; CHECK-SAME: (i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[RETVAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8
+; CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i32 [[VAL]], ptr [[VAL_ADDR]], align 4
+; CHECK-NEXT:    store i32 2, ptr [[RETVAL]], align 8
+; CHECK-NEXT:    [[FIELD3:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[RETVAL]], i32 0, i32 2
+; CHECK-NEXT:    store ptr [[VAL_ADDR]], ptr [[FIELD3]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load { i64, ptr }, ptr [[RETVAL]], align 8
+; CHECK-NEXT:    ret { i64, ptr } [[TMP0]]
+;
+entry:
+  %retval = alloca %struct.Foo, align 8
+  %val.addr = alloca i32, align 4
+  store i32 %val, ptr %val.addr, align 4
+  %field1 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 0
+  store i32 2, ptr %field1, align 8
+  %field3 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 2
+  store ptr %val.addr, ptr %field3, align 8
+  %0 = load { i64, ptr }, ptr %retval, align 8
+  ret { i64, ptr } %0
+}
+
+; Function Attrs: noinline nounwind uwtable
+define dso_local void @positive_test_reduce_array_allocation_1() {
+; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_1() {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAY1:%.*]] = alloca i8, i32 4, align 8
+; CHECK-NEXT:    store i32 0, ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[TMP0]], 2
+; CHECK-NEXT:    store i32 [[TMP1]], ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = add i32 1, 2
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = add i32 [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    store i32 [[TMP4]], ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP5]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %array = alloca ptr, i32 10
+  store i32 0, ptr %array
+  %0 = load i32, ptr %array
+  %1 = add i32 %0, 2
+  store i32 %1, ptr %array
+  %2 = add i32 1, 2
+  %3 = load i32, ptr %array
+  %4 = add i32 %2, %3
+  store i32 %4, ptr %array
+  %5 = load i32, ptr %array
+  %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5)
+  ret void
+}
+
+
+; Function Attrs: noinline nounwind uwtable
+; TODO: Here the array size is not known at compile time.
+; However the array does not escape and is only partially used.
+; Should the optimization reduce the allocation size regardless? Based on AAPointerInfo.
+define dso_local void @baz(ptr noundef %val, i32 noundef %arrayLength) #0 {
+; CHECK-LABEL: define dso_local void @baz
+; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]], i32 noundef [[ARRAYLENGTH:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[VAL_ADDR:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[ARRAYLENGTH_ADDR:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[F:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    store ptr [[VAL]], ptr [[VAL_ADDR]], align 8
+; CHECK-NEXT:    store i32 [[ARRAYLENGTH]], ptr [[ARRAYLENGTH_ADDR]], align 4
+; CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[ARRAYLENGTH]] to i64
+; CHECK-NEXT:    [[MUL:%.*]] = mul i64 4, [[CONV]]
+; CHECK-NEXT:    [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef [[MUL]])
+; CHECK-NEXT:    store ptr [[CALL]], ptr [[F]], align 8
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4
+; CHECK-NEXT:    store i32 [[TMP0]], ptr [[CALL]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4
+; CHECK-NEXT:    [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %val.addr = alloca ptr, align 8
+  %arrayLength.addr = alloca i32, align 4
+  %f = alloca ptr, align 8
+  store ptr %val, ptr %val.addr, align 8
+  store i32 %arrayLength, ptr %arrayLength.addr, align 4
+  %0 = load i32, ptr %arrayLength.addr, align 4
+  %conv = sext i32 %0 to i64
+  %mul = mul i64 4, %conv
+  %call = call noalias ptr @malloc(i64 noundef %mul) #3
+  store ptr %call, ptr %f, align 8
+  %1 = load ptr, ptr %val.addr, align 8
+  %2 = load i32, ptr %1, align 4
+  %3 = load ptr, ptr %f, align 8
+  %arrayidx = getelementptr inbounds i32, ptr %3, i64 0
+  store i32 %2, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %f, align 8
+  %arrayidx1 = getelementptr inbounds i32, ptr %4, i64 0
+  %5 = load i32, ptr %arrayidx1, align 4
+  %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5)
+  ret void
+}
+
+;TODO: Here since only even indexes of the array are part of the output
+;We can reduce the allocation by half and make an array that's accessed contiguously
+; Function Attrs: noinline nounwind uwtable
+define dso_local void @positive_test_reduce_array_allocation_2() #0 {
+; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_2() {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAY:%.*]] = alloca ptr, align 8
+; CHECK-NEXT:    [[I:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 40000)
+; CHECK-NEXT:    store ptr [[CALL]], ptr [[ARRAY]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND:%.*]]
+; CHECK:       for.cond:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10000
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM]]
+; CHECK-NEXT:    store i32 [[TMP1]], ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    br label [[FOR_INC:%.*]]
+; CHECK:       for.inc:
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP3]], 2
+; CHECK-NEXT:    store i32 [[ADD]], ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND]]
+; CHECK:       for.end:
+; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND1:%.*]]
+; CHECK:       for.cond1:
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[CMP2:%.*]] = icmp slt i32 [[TMP4]], 10000
+; CHECK-NEXT:    br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END9:%.*]]
+; CHECK:       for.body3:
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[IDXPROM4:%.*]] = sext i32 [[TMP5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP6]], 1
+; CHECK-NEXT:    store i32 [[ADD6]], ptr [[ARRAYIDX5]], align 4
+; CHECK-NEXT:    br label [[FOR_INC7:%.*]]
+; CHECK:       for.inc7:
+; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP7]], 2
+; CHECK-NEXT:    store i32 [[ADD8]], ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND1]]
+; CHECK:       for.end9:
+; CHECK-NEXT:    store i32 0, ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND10:%.*]]
+; CHECK:       for.cond10:
+; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[CMP11:%.*]] = icmp slt i32 [[TMP8]], 10000
+; CHECK-NEXT:    br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END18:%.*]]
+; CHECK:       for.body12:
+; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[IDXPROM13:%.*]] = sext i32 [[TMP9]] to i64
+; CHECK-NEXT:    [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM13]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4
+; CHECK-NEXT:    [[CALL15:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP10]])
+; CHECK-NEXT:    br label [[FOR_INC16:%.*]]
+; CHECK:       for.inc16:
+; CHECK-NEXT:    [[TMP11:%.*]] = load i32, ptr [[I]], align 4
+; CHECK-NEXT:    [[ADD17:%.*]] = add nsw i32 [[TMP11]], 2
+; CHECK-NEXT:    store i32 [[ADD17]], ptr [[I]], align 4
+; CHECK-NEXT:    br label [[FOR_COND10]]
+; CHECK:       for.end18:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %array = alloca ptr, align 8
+  %i = alloca i32, align 4
+  %call = call noalias ptr @malloc(i64 noundef 40000) #3
+  store ptr %call, ptr %array, align 8
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 10000
+  br i1 %cmp, label %for.body, label %for.end
+
+for.body:
+  %1 = load i32, ptr %i, align 4
+  %2 = load ptr, ptr %array, align 8
+  %3 = load i32, ptr %i, align 4
+  %idxprom = sext i32 %3 to i64
+  %arrayidx = getelementptr inbounds i32, ptr %2, i64 %idxprom
+  store i32 %1, ptr %arrayidx, align 4
+  br label %for.inc
+
+for.inc:
+  %4 = load i32, ptr %i, align 4
+  %add = add nsw i32 %4, 2
+  store i32 %add, ptr %i, align 4
+  br label %for.cond
+
+for.end:
+  store i32 0, ptr %i, align 4
+  br label %for.cond1
+
+for.cond1:
+  %5 = load i32, ptr %i, align 4
+  %cmp2 = icmp slt i32 %5, 10000
+  br i1 %cmp2, label %for.body3, label %for.end9
+
+for.body3:
+  %6 = load ptr, ptr %array, align 8
+  %7 = load i32, ptr %i, align 4
+  %idxprom4 = sext i32 %7 to i64
+  %arrayidx5 = getelementptr inbounds i32, ptr %6, i64 %idxprom4
+  %8 = load i32, ptr %arrayidx5, align 4
+  %add6 = add nsw i32 %8, 1
+  store i32 %add6, ptr %arrayidx5, align 4
+  br label %for.inc7
+
+for.inc7:
+  %9 = load i32, ptr %i, align 4
+  %add8 = add nsw i32 %9, 2
+  store i32 %add8, ptr %i, align 4
+  br label %for.cond1
+
+for.end9:
+  store i32 0, ptr %i, align 4
+  br label %for.cond10
+
+for.cond10:
+  %10 = load i32, ptr %i, align 4
+  %cmp11 = icmp slt i32 %10, 10000
+  br i1 %cmp11, label %for.body12, label %for.end18
+
+for.body12:
+  %11 = load ptr, ptr %array, align 8
+  %12 = load i32, ptr %i, align 4
+  %idxprom13 = sext i32 %12 to i64
+  %arrayidx14 = getelementptr inbounds i32, ptr %11, i64 %idxprom13
+  %13 = load i32, ptr %arrayidx14, align 4
+  %call15 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %13)
+  br label %for.inc16
+
+for.inc16:
+  %14 = load i32, ptr %i, align 4
+  %add17 = add nsw i32 %14, 2
+  store i32 %add17, ptr %i, align 4
+  br label %for.cond10
+
+for.end18:
+  ret void
+}
+
+declare i32 @printf(ptr noundef, ...) #1
+
+; Function Attrs: nounwind allocsize(0)
+declare noalias ptr @malloc(i64 noundef) #1
+;.
+; CHECK: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) }
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CGSCC: {{.*}}
+; TUNIT: {{.*}}
diff --git a/llvm/test/Transforms/Attributor/depgraph.ll b/llvm/test/Transforms/Attributor/depgraph.ll
index 22186edefaf27d3..8cc3d10063c1a26 100644
--- a/llvm/test/Transforms/Attributor/depgraph.ll
+++ b/llvm/test/Transforms/Attributor/depgraph.ll
@@ -1,8 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals
 ; RUN: opt -passes=attributor-cgscc -S < %s 2>&1 | FileCheck %s --check-prefixes=CHECK
-; RUN: opt -passes=attributor-cgscc -disable-output -attributor-print-dep < %s 2>&1 | FileCheck %s --check-prefixes=GRAPH
-; RUN: opt -passes=attributor-cgscc -disable-output -attributor-dump-dep-graph -attributor-depgraph-dot-filename-prefix=%t < %s 2>/dev/null
-; RUN: FileCheck %s -input-file=%t_0.dot --check-prefix=DOT
 
 ; Test 0
 ;
@@ -46,298 +43,7 @@ define ptr @checkAndAdvance(ptr align 16 %0) {
   %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]
   ret ptr %.0
 }
-
-;
-; Check for graph
-;
-
-; GRAPH:      [AAIsDead] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state Live[#BB 4/4][#TBEP 0][#KDE 1]
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %3 = icmp eq i32 %2, 0' at position {flt: [@-1]} with state set-state(< {  %3 = icmp eq i32 %2, 0[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %2 = load i32, ptr %0, align 4' at position {flt: [@-1]} with state set-state(< {  %2 = load i32, ptr %0, align 4[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAUnderlyingObjects] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state UnderlyingObjects <invalid>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI <<null inst>> at position {flt: [@-1]} with state set-state(< {i32 0[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoReturn] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-return
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoReturn] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state may-return
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  ret ptr %.0' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAUndefinedBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state undefined-behavior
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state set-state(< {ptr %0[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoUnwind] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nounwind
-; GRAPH-NEXT:   updates [AAIsDead] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live
-; GRAPH-NEXT:   updates [AANoUnwind] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nounwind
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoUnwind] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nounwind
-; GRAPH-NEXT:   updates [AANoUnwind] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nounwind
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryBehavior] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state readonly
-; GRAPH-NEXT:   updates [AAIsDead] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state readonly
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state readonly
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state readonly
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %2 = load i32, ptr %0, align 4' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %3 = icmp eq i32 %2, 0' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  br i1 %3, label %4, label %7' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< {  %5 = getelementptr inbounds i32, ptr %0, i64 4[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-NEXT:   updates [AAPotentialValues] for CtxI '  %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0 at -1]} with state set-state(< {ptr %0[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state set-state(< {  %.0 = phi ptr [ %6, %4 ], [ %0, %7 ][3], } >)
-; GRAPH-NEXT:   updates [AAPotentialValues] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< {  %5 = getelementptr inbounds i32, ptr %0, i64 4[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-NEXT:   updates [AANoUndef] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state may-undef-or-poison
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AAAlign] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state align<1-16>
-; GRAPH-NEXT:   updates [AANonNull] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state nonnull
-; GRAPH-NEXT:   updates [AADereferenceable] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state unknown-dereferenceable
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0 at -1]} with state set-state(< {ptr %0[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-NEXT:   updates [AAPotentialValues] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state set-state(< {  %.0 = phi ptr [ %6, %4 ], [ %0, %7 ][3], } >)
-; GRAPH-NEXT:   updates [AAPotentialValues] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< {  %5 = getelementptr inbounds i32, ptr %0, i64 4[3],   %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AAAlign] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state align<1-16>
-; GRAPH-NEXT:   updates [AANonNull] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0 at -1]} with state nonnull
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state set-state(< {  %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI <<null inst>> at position {flt: [@-1]} with state set-state(< {i64 4[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %2 = load i32, ptr %0, align 4' at position {flt:checkAndAdvance [checkAndAdvance at -1]} with state set-state(< {@checkAndAdvance[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPotentialValues] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state set-state(< {  %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAInstanceInfo] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state <unique [fAa]>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoRecurse] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state may-recurse
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAInterFnReachability] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state #queries(1)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIntraFnReachability] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state #queries(1)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AACallEdges] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state CallEdges[0,1]
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  br label %8' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoUndef] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state may-undef-or-poison
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoUndef] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state may-undef-or-poison
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoUndef] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state may-undef-or-poison
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAHeapToStack] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state [H2S] Mallocs Good/Bad: 0/0
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMustProgress] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state may-not-progress
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAWillReturn] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state may-noreturn
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAWillReturn] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-noreturn
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoRecurse] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-recurse
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoFree] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nofree
-; GRAPH-NEXT:   updates [AANoFree] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nofree
-; GRAPH-NEXT:   updates [AANoFree] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nofree
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoFree] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nofree
-; GRAPH-NEXT:   updates [AANoFree] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nofree
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoSync] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nosync
-; GRAPH-NEXT:   updates [AANoSync] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nosync
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoSync] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nosync
-; GRAPH-NEXT:   updates [AANoSync] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state nosync
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryLocation] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state memory:argument
-; GRAPH-NEXT:   updates [AAMemoryLocation] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state memory:argument
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryLocation] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state memory:argument
-; GRAPH-NEXT:   updates [AAMemoryLocation] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state memory:argument
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryBehavior] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state readonly
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly
-; GRAPH-NEXT:   updates [AAMemoryLocation] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state memory:argument
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state readonly
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AAMemoryBehavior] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoCapture] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-NEXT:   updates [AANoCapture] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAIsDead] for CtxI '  br label %8' at position {flt: [@-1]} with state assumed-live
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAUnderlyingObjects] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state UnderlyingObjects inter #1 objs, intra #1 objs
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAssumptionInfo] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance at -1]} with state Known [], Assumed []
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAlign] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state align<1-16>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAlign] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state align<16-16>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAlign] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state align<16-16>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAlign] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state align<16-16>
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANonNull] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state nonnull
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANonNull] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0 at -1]} with state nonnull
-; GRAPH-NEXT:   updates [AANonNull] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state nonnull
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANonNull] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nonnull
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoAlias] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state may-alias
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AADereferenceable] for CtxI '  %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance at -1]} with state unknown-dereferenceable
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AADereferenceable] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state dereferenceable_or_null<4-4> [non-null is unknown]
-; GRAPH-NEXT:   updates [AADereferenceable] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state unknown-dereferenceable
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AADereferenceable] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state unknown-dereferenceable
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoFree] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nofree
-; GRAPH-NEXT:   updates [AANoFree] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state nofree
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAPrivatizablePtr] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state [no-priv]
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAssumptionInfo] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state Known [], Assumed []
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoAlias] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state may-alias
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoAlias] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state may-alias
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AANoFree] for CtxI '  %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state nofree
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AAAddressSpace] for CtxI '  %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state addrspace(<invalid>)
-; GRAPH-EMPTY:
-; GRAPH-NEXT: [AADereferenceable] for CtxI '  %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state unknown-dereferenceable
-
-; GRAPH-NOT: update
-
-;
-; Check for .dot file
-;
-; DOT-DAG: Node[[Node0:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node1:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node2:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node3:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node4:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node5:0x[a-z0-9]+]] [shape=record,label="{[AANoReturn]
-; DOT-DAG: Node[[Node6:0x[a-z0-9]+]] [shape=record,label="{[AANoReturn]
-; DOT-DAG: Node[[Node7:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node8:0x[a-z0-9]+]] [shape=record,label="{[AAWillReturn]
-; DOT-DAG: Node[[Node9:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node10:0x[a-z0-9]+]] [shape=record,label="{[AANoUnwind]
-; DOT-DAG: Node[[Node11:0x[a-z0-9]+]] [shape=record,label="{[AANoUnwind]
-; DOT-DAG: Node[[Node12:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryLocation]
-; DOT-DAG: Node[[Node13:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryLocation]
-; DOT-DAG: Node[[Node14:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior]
-; DOT-DAG: Node[[Node15:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node16:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node17:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node18:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior]
-; DOT-DAG: Node[[Node19:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node20:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node22:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node23:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node24:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node25:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node26:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues]
-; DOT-DAG: Node[[Node27:0x[a-z0-9]+]] [shape=record,label="{[AAInstanceInfo]
-; DOT-DAG: Node[[Node28:0x[a-z0-9]+]] [shape=record,label="{[AANoRecurse]
-; DOT-DAG: Node[[Node29:0x[a-z0-9]+]] [shape=record,label="{[AAInterFnReachability]
-; DOT-DAG: Node[[Node30:0x[a-z0-9]+]] [shape=record,label="{[AAIntraFnReachability]
-; DOT-DAG: Node[[Node31:0x[a-z0-9]+]] [shape=record,label="{[AACallEdges]
-; DOT-DAG: Node[[Node32:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node33:0x[a-z0-9]+]] [shape=record,label="{[AAWillReturn]
-; DOT-DAG: Node[[Node34:0x[a-z0-9]+]] [shape=record,label="{[AANoRecurse]
-; DOT-DAG: Node[[Node35:0x[a-z0-9]+]] [shape=record,label="{[AAUndefinedBehavior]
-; DOT-DAG: Node[[Node36:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef]
-; DOT-DAG: Node[[Node37:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef]
-; DOT-DAG: Node[[Node38:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node39:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef]
-; DOT-DAG: Node[[Node41:0x[a-z0-9]+]] [shape=record,label="{[AANoSync]
-; DOT-DAG: Node[[Node42:0x[a-z0-9]+]] [shape=record,label="{[AANoSync]
-; DOT-DAG: Node[[Node43:0x[a-z0-9]+]] [shape=record,label="{[AANoFree]
-; DOT-DAG: Node[[Node44:0x[a-z0-9]+]] [shape=record,label="{[AANoFree]
-; DOT-DAG: Node[[Node45:0x[a-z0-9]+]] [shape=record,label="{[AAAssumptionInfo]
-; DOT-DAG: Node[[Node46:0x[a-z0-9]+]] [shape=record,label="{[AAHeapToStack]
-; DOT-DAG: Node[[Node47:0x[a-z0-9]+]] [shape=record,label="{[AAAlign]
-; DOT-DAG: Node[[Node48:0x[a-z0-9]+]] [shape=record,label="{[AAAlign]
-; DOT-DAG: Node[[Node49:0x[a-z0-9]+]] [shape=record,label="{[AAAlign]
-; DOT-DAG: Node[[Node50:0x[a-z0-9]+]] [shape=record,label="{[AAAlign]
-; DOT-DAG: Node[[Node51:0x[a-z0-9]+]] [shape=record,label="{[AANonNull]
-; DOT-DAG: Node[[Node52:0x[a-z0-9]+]] [shape=record,label="{[AANonNull]
-; DOT-DAG: Node[[Node53:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias]
-; DOT-DAG: Node[[Node54:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable]
-; DOT-DAG: Node[[Node55:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable]
-; DOT-DAG: Node[[Node56:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable]
-; DOT-DAG: Node[[Node59:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node60:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias]
-; DOT-DAG: Node[[Node61:0x[a-z0-9]+]] [shape=record,label="{[AANoCapture]
-; DOT-DAG: Node[[Node62:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node63:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node64:0x[a-z0-9]+]] [shape=record,label="{[AANoCapture]
-; DOT-DAG: Node[[Node65:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead]
-; DOT-DAG: Node[[Node66:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior]
-; DOT-DAG: Node[[Node67:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior]
-; DOT-DAG: Node[[Node68:0x[a-z0-9]+]] [shape=record,label="{[AANoFree]
-; DOT-DAG: Node[[Node69:0x[a-z0-9]+]] [shape=record,label="{[AAPrivatizablePtr]
-; DOT-DAG: Node[[Node70:0x[a-z0-9]+]] [shape=record,label="{[AAAssumptionInfo]
-; DOT-DAG: Node[[Node71:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias]
-; DOT-DAG: Node[[Node73:0x[a-z0-9]+]] [shape=record,label="{[AANoFree]
-; DOT-DAG: Node[[Node75:0x[a-z0-9]+]] [shape=record,label="{[AAAddressSpace]
-; DOT-DAG: Node[[Node74:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable]
-
-; DOT-DAG: Node[[Node20]] -> Node[[Node19]];
-; DOT-DAG: Node[[Node13]] -> Node[[Node12]];
-; DOT-DAG: Node[[Node55]] -> Node[[Node56]];
-; DOT-DAG: Node[[Node68]] -> Node[[Node73]];
-; DOT-DAG: Node[[Node64]] -> Node[[Node61]];
-; DOT-DAG: Node[[Node61]] -> Node[[Node64]];
-; DOT-DAG: Node[[Node12]] -> Node[[Node13]];
-; DOT-DAG: Node[[Node11]] -> Node[[Node61]];
-; DOT-DAG: Node[[Node14]] -> Node[[Node18]];
-; DOT-DAG: Node[[Node43]] -> Node[[Node68]];
-; DOT-DAG: Node[[Node19]] -> Node[[Node22]];
-; DOT-DAG: Node[[Node10]] -> Node[[Node11]];
-; DOT-DAG: Node[[Node41]] -> Node[[Node42]];
-; DOT-DAG: Node[[Node42]] -> Node[[Node41]];
-; DOT-DAG: Node[[Node11]] -> Node[[Node10]];
-; DOT-DAG: Node[[Node67]] -> Node[[Node66]];
-; DOT-DAG: Node[[Node18]] -> Node[[Node14]];
-; DOT-DAG: Node[[Node66]] -> Node[[Node67]];
-; DOT-DAG: Node[[Node44]] -> Node[[Node43]];
-; DOT-DAG: Node[[Node43]] -> Node[[Node44]];
 ;.
 ; CHECK: attributes #[[ATTR0]] = { nofree nosync nounwind memory(argmem: read) }
 ; CHECK: attributes #[[ATTR1]] = { nofree nosync nounwind memory(read) }
 ;.
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GRAPH: {{.*}}
diff --git a/llvm/test/Transforms/Attributor/nodelete.ll b/llvm/test/Transforms/Attributor/nodelete.ll
index 9d754506c5c9d7c..03477e6589a74ca 100644
--- a/llvm/test/Transforms/Attributor/nodelete.ll
+++ b/llvm/test/Transforms/Attributor/nodelete.ll
@@ -10,6 +10,7 @@ define hidden i64 @f1() align 2 {
 ; TUNIT-LABEL: define {{[^@]+}}@f1
 ; TUNIT-SAME: () #[[ATTR0:[0-9]+]] align 2 {
 ; TUNIT-NEXT:  entry:
+; TUNIT-NEXT:    [[REF_TMP1:%.*]] = alloca i8, i32 0, align 8
 ; TUNIT-NEXT:    ret i64 undef
 ;
 ; CGSCC: Function Attrs: mustprogress nofree nosync nounwind willreturn memory(none)



More information about the llvm-commits mailing list