[llvm] [Attributor]: AApointerInfo - store the full chain of instructions that make up the access (PR #96526)
Vidush Singhal via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 8 13:24:08 PDT 2024
https://github.com/vidsinghal updated https://github.com/llvm/llvm-project/pull/96526
>From 0e2b87e49b79a492da03b7e4a24be828d49a64ae Mon Sep 17 00:00:00 2001
From: vidsinghal <vidush.sl at gmail.com>
Date: Mon, 24 Jun 2024 11:00:52 -0400
Subject: [PATCH] Store the full chain of instructions that make up the access.
---
llvm/include/llvm/Transforms/IPO/Attributor.h | 200 ++++++++-
.../Transforms/IPO/AttributorAttributes.cpp | 313 +++++++++-----
.../pointer-info-track-access-chain.ll | 387 ++++++++++++++++++
3 files changed, 790 insertions(+), 110 deletions(-)
create mode 100644 llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index f70fd02ca573cc..6f989e3317fff8 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -103,7 +103,10 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/CFG.h"
@@ -141,6 +144,7 @@
#include <limits>
#include <map>
#include <optional>
+#include <tuple>
namespace llvm {
@@ -320,6 +324,10 @@ inline bool operator==(const RangeTy &A, const RangeTy &B) {
return A.Offset == B.Offset && A.Size == B.Size;
}
+inline bool operator<(const RangeTy &A, const RangeTy &B) {
+ return A.Offset < B.Offset;
+}
+
inline bool operator!=(const RangeTy &A, const RangeTy &B) { return !(A == B); }
/// Return the initial value of \p Obj with type \p Ty if that is a constant.
@@ -5785,6 +5793,135 @@ struct AAPointerInfo : public AbstractAttribute {
AK_MUST_READ_WRITE = AK_MUST | AK_R | AK_W,
};
+ /// A helper containing a list of offsets computed for a Use. Ideally this
+ /// list should be strictly ascending, but we ensure that only when we
+ /// actually translate the list of offsets to a RangeList.
+ struct OffsetInfo {
+ using VecTy = SmallVector<AA::RangeTy>;
+ // A map to store depth 1 predecessors per offset.
+ using OriginsTy = SmallVector<SmallPtrSet<Value *, 4>>;
+ using const_iterator = VecTy::const_iterator;
+ OriginsTy Origins;
+ VecTy Ranges;
+
+ const_iterator begin() const { return Ranges.begin(); }
+ const_iterator end() const { return Ranges.end(); }
+
+ bool operator==(const OffsetInfo &RHS) const {
+ return Ranges == RHS.Ranges && Origins == RHS.Origins;
+ }
+
+ bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
+
+ // Insert a new Range and Origin
+ void insert(AA::RangeTy Range, Value &V) {
+ auto *It = std::find(Ranges.begin(), Ranges.end(), Range);
+ // Offset exists in Offsets map
+ if (It != Ranges.end()) {
+ size_t Index = It - Ranges.begin();
+ if (Index < Origins.size())
+ Origins[Index].insert(&V);
+ } else {
+ Ranges.push_back(Range);
+ Origins.emplace_back();
+ Origins.back().insert(&V);
+ }
+ }
+
+ void setSizeAll(uint64_t Size) {
+ for (auto &Range : Ranges)
+ Range.Size = Size;
+ }
+
+ // Helper function to get just the offsets from Ranges.
+ void getOnlyOffsets(SmallVector<int64_t> &Offsets) {
+ for (auto &Range : Ranges)
+ Offsets.push_back(Range.Offset);
+ // ensure unique
+ sort(Offsets.begin(), Offsets.end());
+ Offsets.erase(std::unique(Offsets.begin(), Offsets.end()), Offsets.end());
+ }
+
+ bool isUnassigned() const { return Ranges.empty(); }
+
+ bool isUnknown() const {
+ if (isUnassigned())
+ return false;
+ if (Ranges.size() == 1)
+ return Ranges.front().Offset == AA::RangeTy::Unknown;
+ return false;
+ }
+
+ void setUnknown(Value &V) {
+ Ranges.clear();
+ Origins.clear();
+ insert(AA::RangeTy{AA::RangeTy::Unknown, AA::RangeTy::Unknown}, V);
+ }
+
+ void addToAll(int64_t Inc, Value &V) {
+ for (auto &Range : Ranges)
+ Range.Offset += Inc;
+
+ if (!Origins.empty()) {
+ for (auto &Origin : Origins)
+ Origin.insert(&V);
+ } else {
+ for (size_t Index = 0; Index < Ranges.size(); Index++) {
+ Origins.emplace_back();
+ Origins[Index].insert(&V);
+ }
+ }
+ }
+
+ void addToAll(int64_t Inc) {
+ for (auto &Range : Ranges)
+ Range.Offset += Inc;
+ }
+
+ /// Copy offsets from \p R into the current list.
+ ///
+ /// Ideally all lists should be strictly ascending, but we defer that to the
+ /// actual use of the list. So we just blindly append here.
+ void merge(const OffsetInfo &R) {
+ Ranges.append(R.Ranges);
+ // ensure elements are unique.
+ sort(Ranges.begin(), Ranges.end());
+ Ranges.erase(std::unique(Ranges.begin(), Ranges.end()), Ranges.end());
+
+ OriginsTy ToBeMergeOrigins = R.Origins;
+ for (auto &Origin : ToBeMergeOrigins)
+ Origins.emplace_back(Origin);
+ }
+
+ // Merge two OffsetInfo structs.
+ // takes an additional origin argument
+ // and adds it to the corresponding offset in the
+ // origins map.
+ void mergeWithOffset(const OffsetInfo &R, Value &CurPtr) {
+ Ranges.append(R.Ranges);
+ // ensure elements are unique.
+ sort(Ranges.begin(), Ranges.end());
+ Ranges.erase(std::unique(Ranges.begin(), Ranges.end()), Ranges.end());
+ auto &ROffsets = R.Ranges;
+ for (auto Offset : ROffsets) {
+ auto *It = std::find(Ranges.begin(), Ranges.end(), Offset);
+ if (It == Ranges.end())
+ continue;
+ size_t Index = It - Ranges.begin();
+ if (Index >= Origins.size()) {
+ Origins.emplace_back();
+ Origins.back().insert(&CurPtr);
+ } else {
+ Origins[Index].insert(&CurPtr);
+ }
+ }
+ }
+ };
+
+ using OffsetInfoMapTy = DenseMap<Value *, OffsetInfo>;
+ using AccessPathTy = SmallVector<Value *, 4>;
+ using AccessPathSetTy = SmallPtrSet<AccessPathTy *, 4>;
+
/// A container for a list of ranges.
struct RangeList {
// The set of ranges rarely contains more than one element, and is unlikely
@@ -5939,15 +6076,17 @@ struct AAPointerInfo : public AbstractAttribute {
/// An access description.
struct Access {
Access(Instruction *I, int64_t Offset, int64_t Size,
- std::optional<Value *> Content, AccessKind Kind, Type *Ty)
+ std::optional<Value *> Content, AccessKind Kind, Type *Ty,
+ AccessPathSetTy *AccessPaths)
: LocalI(I), RemoteI(I), Content(Content), Ranges(Offset, Size),
- Kind(Kind), Ty(Ty) {
+ Kind(Kind), Ty(Ty), AccessPaths(AccessPaths) {
verify();
}
Access(Instruction *LocalI, Instruction *RemoteI, const RangeList &Ranges,
- std::optional<Value *> Content, AccessKind K, Type *Ty)
+ std::optional<Value *> Content, AccessKind K, Type *Ty,
+ AccessPathSetTy *AccessPaths)
: LocalI(LocalI), RemoteI(RemoteI), Content(Content), Ranges(Ranges),
- Kind(K), Ty(Ty) {
+ Kind(K), Ty(Ty), AccessPaths(AccessPaths) {
if (Ranges.size() > 1) {
Kind = AccessKind(Kind | AK_MAY);
Kind = AccessKind(Kind & ~AK_MUST);
@@ -5956,9 +6095,9 @@ struct AAPointerInfo : public AbstractAttribute {
}
Access(Instruction *LocalI, Instruction *RemoteI, int64_t Offset,
int64_t Size, std::optional<Value *> Content, AccessKind Kind,
- Type *Ty)
+ Type *Ty, AccessPathSetTy *AccessPaths)
: LocalI(LocalI), RemoteI(RemoteI), Content(Content),
- Ranges(Offset, Size), Kind(Kind), Ty(Ty) {
+ Ranges(Offset, Size), Kind(Kind), Ty(Ty), AccessPaths(AccessPaths) {
verify();
}
Access(const Access &Other) = default;
@@ -5966,7 +6105,8 @@ struct AAPointerInfo : public AbstractAttribute {
Access &operator=(const Access &Other) = default;
bool operator==(const Access &R) const {
return LocalI == R.LocalI && RemoteI == R.RemoteI && Ranges == R.Ranges &&
- Content == R.Content && Kind == R.Kind;
+ Content == R.Content && Kind == R.Kind &&
+ checkAccessPathsAreSame(R.AccessPaths);
}
bool operator!=(const Access &R) const { return !(*this == R); }
@@ -6078,11 +6218,53 @@ struct AAPointerInfo : public AbstractAttribute {
}
}
+ // Merge two access paths into one.
+ void mergeAccessPaths(const AccessPathSetTy *AccessPathsNew) const {
+ for (auto *Path : *AccessPathsNew)
+ if (!existsChain(Path))
+ AccessPaths->insert(Path);
+ }
+
+ // Check if the given access paths are same.
+ bool checkAccessPathsAreSame(const AccessPathSetTy *AccessPathsR) const {
+ bool IsSame = true;
+ if (AccessPaths->size() != AccessPathsR->size())
+ return false;
+
+ for (auto *Path : *AccessPathsR) {
+ if (!existsChain(Path))
+ IsSame = false;
+ }
+ return IsSame;
+ }
+
+ // Check if the chain exists in the AccessPathsSet.
+ bool existsChain(const AccessPathTy *NewPath) const {
+ for (auto *OldPath : *AccessPaths)
+ if (*OldPath == *NewPath)
+ return true;
+
+ return false;
+ }
+
+ void dumpAccessPaths(raw_ostream &O) const {
+ O << "Print all access paths found:"
+ << "\n";
+ for (auto *It : *AccessPaths) {
+ O << "Backtrack a unique access path:\n";
+ for (Value *Ins : *It) {
+ O << *Ins << "\n";
+ }
+ }
+ }
+
+ const AccessPathSetTy *getAccessChain() const { return AccessPaths; }
const RangeList &getRanges() const { return Ranges; }
using const_iterator = RangeList::const_iterator;
const_iterator begin() const { return Ranges.begin(); }
const_iterator end() const { return Ranges.end(); }
+ size_t size() const { return Ranges.size(); }
private:
/// The instruction responsible for the access with respect to the local
@@ -6105,6 +6287,10 @@ struct AAPointerInfo : public AbstractAttribute {
/// The type of the content, thus the type read/written, can be null if not
/// available.
Type *Ty;
+
+ /// The full chain of instructions that participate in the Access.
+ /// There may be more than one access chain.
+ AccessPathSetTy *AccessPaths;
};
/// Create an abstract attribute view for the position \p IRP.
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index 2816a85743faad..941bffeb2ee36f 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -11,6 +11,8 @@
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/ADT/APInt.h"
@@ -850,8 +852,13 @@ struct AA::PointerInfo::State : public AbstractState {
ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges,
Instruction &I, std::optional<Value *> Content,
AAPointerInfo::AccessKind Kind, Type *Ty,
+ AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap,
Instruction *RemoteI = nullptr);
+ AAPointerInfo::AccessPathSetTy *
+ findAllAccessPaths(AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap,
+ Instruction *LocalI);
+
AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); }
AAPointerInfo::const_bin_iterator end() const { return OffsetBins.end(); }
int64_t numOffsetBins() const { return OffsetBins.size(); }
@@ -926,10 +933,95 @@ struct AA::PointerInfo::State : public AbstractState {
BooleanState BS;
};
+AAPointerInfo::AccessPathSetTy *AA::PointerInfo::State::findAllAccessPaths(
+ AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *LocalI) {
+ AAPointerInfo::AccessPathSetTy *AccessPathsSet =
+ new AAPointerInfo::AccessPathSetTy();
+
+ // Store the instruction and its storage (i.e, which path it belongs to)
+ // on the stack.
+ // We also store the visited map on the stack.
+ // Since we want to find new paths, we want to make sure an instruction is
+ // not visited twice on the same path. However, we can visit the same
+ // instruction more that once if it exists on different paths.
+ using VisitedTy = SmallPtrSet<Value *, 4>;
+ using StackElementTy =
+ std::tuple<Value *, AAPointerInfo::AccessPathTy *, VisitedTy>;
+
+ SmallVector<StackElementTy, 16> Stack;
+
+ // Populate the stack with elements.
+ for (auto *It = LocalI->op_begin(); It != LocalI->op_end(); It++) {
+ Value *V = cast<Value>(It);
+ if (!OffsetInfoMap.contains(V))
+ continue;
+
+ SmallPtrSet<Value *, 4> LocalVisitedMap;
+ AAPointerInfo::AccessPathTy *NewPath = new AAPointerInfo::AccessPathTy();
+ AccessPathsSet->insert(NewPath);
+ NewPath->push_back(LocalI);
+ Stack.push_back(std::make_tuple(V, NewPath, LocalVisitedMap));
+ }
+
+ while (!Stack.empty()) {
+ auto Entry = Stack.pop_back_val();
+ Value *Top = std::get<0>(Entry);
+ AAPointerInfo::AccessPathTy *CurrentChain = std::get<1>(Entry);
+ auto &Visited = std::get<2>(Entry);
+
+ if (!OffsetInfoMap.contains(Top))
+ continue;
+
+ if (!Visited.insert(Top).second)
+ continue;
+
+ CurrentChain->push_back(Top);
+ auto OI = OffsetInfoMap.lookup(Top);
+ auto &Origins = OI.Origins;
+
+ SmallPtrSet<Value *, 16> Successors;
+ for (auto &Origin : Origins) {
+ for (auto *Val : Origin) {
+ // Since we store depth 1 predecessors in our Origins map
+ // We can be sure that we hit termination condition if the
+ // Successor is the current instruction.
+ if (Val != Top)
+ Successors.insert(Val);
+ }
+ }
+
+ if (Successors.empty())
+ continue;
+
+ // Create new paths to be forked
+ SmallVector<AAPointerInfo::AccessPathTy *> NewPaths;
+ NewPaths.push_back(CurrentChain);
+ for (size_t Index = 1; Index < Successors.size(); Index++) {
+ AAPointerInfo::AccessPathTy *NewPath = new AAPointerInfo::AccessPathTy(
+ CurrentChain->begin(), CurrentChain->end());
+ NewPaths.push_back(NewPath);
+ }
+
+ int Index = 0;
+ // Traverse the successors
+ for (auto *Successor : Successors) {
+ AAPointerInfo::AccessPathTy *NextChain = NewPaths[Index];
+ AccessPathsSet->insert(NextChain);
+ // Push successors to traverse and their corresponding storage on
+ // stack.
+ VisitedTy NewVisitedSet(Visited.begin(), Visited.end());
+ Stack.push_back(std::make_tuple(Successor, NextChain, NewVisitedSet));
+ Index++;
+ }
+ }
+
+ return AccessPathsSet;
+}
+
ChangeStatus AA::PointerInfo::State::addAccess(
Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I,
std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty,
- Instruction *RemoteI) {
+ AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *RemoteI) {
RemoteI = RemoteI ? RemoteI : &I;
// Check if we have an access for this instruction, if not, simply add it.
@@ -956,7 +1048,11 @@ ChangeStatus AA::PointerInfo::State::addAccess(
};
if (!AccExists) {
- AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty);
+ AAPointerInfo::AccessPathSetTy *AccessPaths =
+ AA::PointerInfo::State::findAllAccessPaths(OffsetInfoMap, &I);
+ AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty,
+ AccessPaths);
+
assert((AccessList.size() == AccIndex + 1) &&
"New Access should have been at AccIndex");
LocalList.push_back(AccIndex);
@@ -966,13 +1062,20 @@ ChangeStatus AA::PointerInfo::State::addAccess(
// Combine the new Access with the existing Access, and then update the
// mapping in the offset bins.
- AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty);
+ AAPointerInfo::AccessPathSetTy *AccessPaths =
+ AA::PointerInfo::State::findAllAccessPaths(OffsetInfoMap, &I);
+ AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty,
+ AccessPaths);
auto &Current = AccessList[AccIndex];
auto Before = Current;
+
Current &= Acc;
if (Current == Before)
return ChangeStatus::UNCHANGED;
+ // Merge the newly generated access paths with the old access paths.
+ Before.mergeAccessPaths(Acc.getAccessChain());
+
auto &ExistingRanges = Before.getRanges();
auto &NewRanges = Current.getRanges();
@@ -1002,60 +1105,19 @@ ChangeStatus AA::PointerInfo::State::addAccess(
namespace {
-/// A helper containing a list of offsets computed for a Use. Ideally this
-/// list should be strictly ascending, but we ensure that only when we
-/// actually translate the list of offsets to a RangeList.
-struct OffsetInfo {
- using VecTy = SmallVector<int64_t>;
- using const_iterator = VecTy::const_iterator;
- VecTy Offsets;
-
- const_iterator begin() const { return Offsets.begin(); }
- const_iterator end() const { return Offsets.end(); }
-
- bool operator==(const OffsetInfo &RHS) const {
- return Offsets == RHS.Offsets;
- }
-
- bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
-
- void insert(int64_t Offset) { Offsets.push_back(Offset); }
- bool isUnassigned() const { return Offsets.size() == 0; }
-
- bool isUnknown() const {
- if (isUnassigned())
- return false;
- if (Offsets.size() == 1)
- return Offsets.front() == AA::RangeTy::Unknown;
- return false;
- }
-
- void setUnknown() {
- Offsets.clear();
- Offsets.push_back(AA::RangeTy::Unknown);
- }
-
- void addToAll(int64_t Inc) {
- for (auto &Offset : Offsets) {
- Offset += Inc;
- }
- }
-
- /// Copy offsets from \p R into the current list.
- ///
- /// Ideally all lists should be strictly ascending, but we defer that to the
- /// actual use of the list. So we just blindly append here.
- void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
-};
-
#ifndef NDEBUG
-static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) {
+static raw_ostream &operator<<(raw_ostream &OS,
+ const AAPointerInfo::OffsetInfo &OI) {
ListSeparator LS;
- OS << "[";
+ int I = 0;
for (auto Offset : OI) {
- OS << LS << Offset;
+ OS << LS << "[Offset, Size]: " << Offset << "\n";
+ auto &Origin = OI.Origins[I];
+ for (auto *Val : Origin)
+ OS << "Origin: " << *Val << "\n";
}
- OS << "]";
+ OS << "\n";
+
return OS;
}
#endif // NDEBUG
@@ -1365,7 +1427,8 @@ struct AAPointerInfoImpl
ChangeStatus translateAndAddStateFromCallee(Attributor &A,
const AAPointerInfo &OtherAA,
- CallBase &CB) {
+ CallBase &CB,
+ OffsetInfoMapTy &OffsetInfoMap) {
using namespace AA::PointerInfo;
if (!OtherAA.getState().isValidState() || !isValidState())
return indicatePessimisticFixpoint();
@@ -1388,15 +1451,16 @@ struct AAPointerInfoImpl
AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
- Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK,
- RAcc.getType(), RAcc.getRemoteInst());
+ Changed |=
+ addAccess(A, RAcc.getRanges(), CB, Content, AK, RAcc.getType(),
+ OffsetInfoMap, RAcc.getRemoteInst());
}
}
return Changed;
}
ChangeStatus translateAndAddState(Attributor &A, const AAPointerInfo &OtherAA,
- const OffsetInfo &Offsets, CallBase &CB) {
+ const OffsetInfo &Ranges, CallBase &CB) {
using namespace AA::PointerInfo;
if (!OtherAA.getState().isValidState() || !isValidState())
return indicatePessimisticFixpoint();
@@ -1409,16 +1473,16 @@ struct AAPointerInfoImpl
for (const auto &It : State) {
for (auto Index : It.getSecond()) {
const auto &RAcc = State.getAccess(Index);
- for (auto Offset : Offsets) {
- auto NewRanges = Offset == AA::RangeTy::Unknown
+ for (auto Range : Ranges) {
+ auto NewRanges = Range.Offset == AA::RangeTy::Unknown
? AA::RangeTy::getUnknown()
: RAcc.getRanges();
if (!NewRanges.isUnknown()) {
- NewRanges.addToAllOffsets(Offset);
+ NewRanges.addToAllOffsets(Range.Offset);
}
Changed |=
addAccess(A, NewRanges, CB, RAcc.getContent(), RAcc.getKind(),
- RAcc.getType(), RAcc.getRemoteInst());
+ RAcc.getType(), OffsetInfoMap, RAcc.getRemoteInst());
}
}
}
@@ -1449,9 +1513,12 @@ struct AAPointerInfoImpl
else
O << " - c: <unknown>\n";
}
+ Acc.dumpAccessPaths(O);
}
}
}
+
+ OffsetInfoMapTy OffsetInfoMap;
};
struct AAPointerInfoFloating : public AAPointerInfoImpl {
@@ -1462,8 +1529,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
/// Deal with an access and signal if it was handled successfully.
bool handleAccess(Attributor &A, Instruction &I,
std::optional<Value *> Content, AccessKind Kind,
- SmallVectorImpl<int64_t> &Offsets, ChangeStatus &Changed,
- Type &Ty) {
+ OffsetInfo &OI, ChangeStatus &Changed, Type &Ty,
+ OffsetInfoMapTy &OffsetInfoMap) {
using namespace AA::PointerInfo;
auto Size = AA::RangeTy::Unknown;
const DataLayout &DL = A.getDataLayout();
@@ -1472,16 +1539,23 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
Size = AccessSize.getFixedValue();
// Make a strictly ascending list of offsets as required by addAccess()
- llvm::sort(Offsets);
- auto *Last = llvm::unique(Offsets);
- Offsets.erase(Last, Offsets.end());
+ auto Ranges = OI.Ranges;
+ auto Origins = OI.Origins;
+
+ llvm::sort(Ranges);
+ auto *Last = llvm::unique(Ranges);
+ Ranges.erase(Last, Ranges.end());
+
+ SmallVector<int64_t> OffsetsOnly;
+ OI.getOnlyOffsets(OffsetsOnly);
VectorType *VT = dyn_cast<VectorType>(&Ty);
if (!VT || VT->getElementCount().isScalable() ||
!Content.value_or(nullptr) || !isa<Constant>(*Content) ||
(*Content)->getType() != VT ||
DL.getTypeStoreSize(VT->getElementType()).isScalable()) {
- Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty);
+ Changed = Changed | addAccess(A, {OffsetsOnly, Size}, I, Content, Kind,
+ &Ty, OffsetInfoMap);
} else {
// Handle vector stores with constant content element-wise.
// TODO: We could look for the elements or create instructions
@@ -1493,7 +1567,9 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
int64_t ElementSize = DL.getTypeStoreSize(ElementType).getFixedValue();
auto *ConstContent = cast<Constant>(*Content);
Type *Int32Ty = Type::getInt32Ty(ElementType->getContext());
- SmallVector<int64_t> ElementOffsets(Offsets.begin(), Offsets.end());
+
+ SmallVector<int64_t> ElementOffsets;
+ OI.getOnlyOffsets(ElementOffsets);
for (int i = 0, e = VT->getElementCount().getFixedValue(); i != e; ++i) {
Value *ElementContent = ConstantExpr::getExtractElement(
@@ -1501,7 +1577,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
// Add the element access.
Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I,
- ElementContent, Kind, ElementType);
+ ElementContent, Kind, ElementType,
+ OffsetInfoMap);
// Advance the offsets for the next element.
for (auto &ElementOffset : ElementOffsets)
@@ -1520,7 +1597,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
/// \return true iff \p UsrOI is updated.
bool collectConstantsForGEP(Attributor &A, const DataLayout &DL,
OffsetInfo &UsrOI, const OffsetInfo &PtrOI,
- const GEPOperator *GEP);
+ GEPOperator *GEP, Value *CurPtr);
/// See AbstractAttribute::trackStatistics()
void trackStatistics() const override {
@@ -1528,11 +1605,9 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
}
};
-bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
- const DataLayout &DL,
- OffsetInfo &UsrOI,
- const OffsetInfo &PtrOI,
- const GEPOperator *GEP) {
+bool AAPointerInfoFloating::collectConstantsForGEP(
+ Attributor &A, const DataLayout &DL, OffsetInfo &UsrOI,
+ const OffsetInfo &PtrOI, GEPOperator *GEP, Value *CurPtr) {
unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
MapVector<Value *, APInt> VariableOffsets;
APInt ConstantOffset(BitWidth, 0);
@@ -1542,7 +1617,7 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
"determined to be unknown.");
if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*CurPtr);
return true;
}
@@ -1551,7 +1626,9 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
<< *GEP << "\n");
auto Union = PtrOI;
- Union.addToAll(ConstantOffset.getSExtValue());
+ // clear the origins since we just want to keep only one predecessor.
+ Union.Origins.clear();
+ Union.addToAll(ConstantOffset.getSExtValue(), *CurPtr);
// Each VI in VariableOffsets has a set of potential constant values. Every
// combination of elements, picked one each from these sets, is separately
@@ -1560,7 +1637,7 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
auto *PotentialConstantsAA = A.getAAFor<AAPotentialConstantValues>(
*this, IRPosition::value(*VI.first), DepClassTy::OPTIONAL);
if (!PotentialConstantsAA || !PotentialConstantsAA->isValidState()) {
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*CurPtr);
return true;
}
@@ -1579,14 +1656,17 @@ bool AAPointerInfoFloating::collectConstantsForGEP(Attributor &A,
OffsetInfo Product;
for (const auto &ConstOffset : AssumedSet) {
auto CopyPerOffset = Union;
- CopyPerOffset.addToAll(ConstOffset.getSExtValue() *
- VI.second.getZExtValue());
+ CopyPerOffset.addToAll(
+ ConstOffset.getSExtValue() * VI.second.getZExtValue(), *CurPtr);
Product.merge(CopyPerOffset);
}
Union = Product;
}
UsrOI = std::move(Union);
+ TypeSize Size = DL.getTypeAllocSize(GEP->getResultElementType());
+ UsrOI.setSizeAll(Size);
+
return true;
}
@@ -1595,9 +1675,27 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
ChangeStatus Changed = ChangeStatus::UNCHANGED;
const DataLayout &DL = A.getDataLayout();
Value &AssociatedValue = getAssociatedValue();
-
- DenseMap<Value *, OffsetInfo> OffsetInfoMap;
- OffsetInfoMap[&AssociatedValue].insert(0);
+ OffsetInfoMap.clear();
+
+ uint64_t Size;
+ Function *F = getAssociatedFunction();
+ TargetLibraryInfo *TLI = nullptr;
+ if (F)
+ TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
+
+ if (TLI && getObjectSize(&AssociatedValue, Size, DL, TLI)) {
+ OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, Size),
+ AssociatedValue);
+ } else if (isa<GlobalVariable>(AssociatedValue)) {
+ auto &Glob = cast<GlobalVariable>(AssociatedValue);
+ TypeSize SizeOfType = DL.getTypeAllocSize(Glob.getValueType());
+ OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, SizeOfType),
+ AssociatedValue);
+ } else {
+ TypeSize SizeOfType = DL.getTypeAllocSize(AssociatedValue.getType());
+ OffsetInfoMap[&AssociatedValue].insert(AA::RangeTy(0, SizeOfType),
+ AssociatedValue);
+ }
auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
// One does not simply walk into a map and assign a reference to a possibly
@@ -1616,7 +1714,14 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
auto &PtrOI = OffsetInfoMap[CurPtr];
assert(!PtrOI.isUnassigned() &&
"Cannot pass through if the input Ptr was not visited!");
- UsrOI.merge(PtrOI);
+ if (isa<PHINode>(Usr) || isa<SelectInst>(Usr)) {
+ UsrOI.mergeWithOffset(PtrOI, *CurPtr);
+ } else {
+ UsrOI = PtrOI;
+ UsrOI.Origins.clear();
+ UsrOI.addToAll(0, *CurPtr);
+ }
+
Follow = true;
return true;
};
@@ -1624,6 +1729,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
auto UsePred = [&](const Use &U, bool &Follow) -> bool {
Value *CurPtr = U.get();
User *Usr = U.getUser();
+
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr
<< "\n");
assert(OffsetInfoMap.count(CurPtr) &&
@@ -1649,11 +1755,11 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
if (PtrOI.isUnknown()) {
Follow = true;
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*GEP);
return true;
}
- Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP);
+ Follow = collectConstantsForGEP(A, DL, UsrOI, PtrOI, GEP, CurPtr);
return true;
}
if (isa<PtrToIntInst>(Usr))
@@ -1677,7 +1783,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand offset unknown "
<< *CurPtr << " in " << *PHI << "\n");
Follow = !UsrOI.isUnknown();
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*CurPtr);
return true;
}
@@ -1686,6 +1792,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
assert(!PtrOI.isUnassigned() &&
"Cannot assign if the current Ptr was not visited!");
LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant (so far)");
+
return true;
}
@@ -1700,7 +1807,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
<< *CurPtr << " in " << *PHI
<< " (base: " << *CurPtrBase << ")\n");
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*CurPtr);
Follow = true;
return true;
}
@@ -1717,7 +1824,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
*PHI->getFunction());
if (mayBeInCycle(CI, cast<Instruction>(Usr), /* HeaderOnly */ true)) {
auto BaseOI = It->getSecond();
- BaseOI.addToAll(Offset.getZExtValue());
+ BaseOI.addToAll(Offset.getZExtValue(), *CurPtr);
if (IsFirstPHIUser || BaseOI == UsrOI) {
LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI is invariant " << *CurPtr
<< " in " << *Usr << "\n");
@@ -1727,12 +1834,12 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
LLVM_DEBUG(
dbgs() << "[AAPointerInfo] PHI operand pointer offset mismatch "
<< *CurPtr << " in " << *PHI << "\n");
- UsrOI.setUnknown();
+ UsrOI.setUnknown(*CurPtr);
Follow = true;
return true;
}
- UsrOI.merge(PtrOI);
+ UsrOI.mergeWithOffset(PtrOI, *CurPtr);
Follow = true;
return true;
}
@@ -1746,8 +1853,8 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
else
AK = AccessKind(AK | AccessKind::AK_MAY);
if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK,
- OffsetInfoMap[CurPtr].Offsets, Changed,
- *LoadI->getType()))
+ OffsetInfoMap[CurPtr], Changed, *LoadI->getType(),
+ OffsetInfoMap))
return false;
auto IsAssumption = [](Instruction &I) {
@@ -1831,9 +1938,9 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
Content =
A.getAssumedSimplified(*Assumption.first, *this,
UsedAssumedInformation, AA::Interprocedural);
- return handleAccess(
- A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION,
- OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType());
+ return handleAccess(A, *Assumption.second, Content,
+ AccessKind::AK_ASSUMPTION, OffsetInfoMap[CurPtr],
+ Changed, *LoadI->getType(), OffsetInfoMap);
}
auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy,
@@ -1859,8 +1966,8 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
if (ValueOp)
Content = A.getAssumedSimplified(
*ValueOp, *this, UsedAssumedInformation, AA::Interprocedural);
- return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets,
- Changed, ValueTy);
+ return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr], Changed,
+ ValueTy, OffsetInfoMap);
};
if (auto *StoreI = dyn_cast<StoreInst>(Usr))
@@ -1983,8 +2090,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
} else {
auto Kind =
ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ;
- Changed =
- Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr);
+ Changed = Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind,
+ nullptr, OffsetInfoMap);
}
LLVM_DEBUG({
dbgs() << "Accesses by bin after update:\n";
@@ -2004,8 +2111,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
auto *ArgAA =
A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
if (ArgAA && ArgAA->getState().isValidState())
- return translateAndAddStateFromCallee(A, *ArgAA,
- *cast<CallBase>(getCtxI()));
+ return translateAndAddStateFromCallee(
+ A, *ArgAA, *cast<CallBase>(getCtxI()), OffsetInfoMap);
if (!Arg->getParent()->isDeclaration())
return indicatePessimisticFixpoint();
}
@@ -2022,7 +2129,7 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
auto Kind =
ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE;
return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind,
- nullptr);
+ nullptr, OffsetInfoMap);
}
/// See AbstractAttribute::trackStatistics()
diff --git a/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
new file mode 100644
index 00000000000000..b7c3f1f33191ef
--- /dev/null
+++ b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
@@ -0,0 +1,387 @@
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -debug-only=attributor -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -debug-only=attributor -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+
+ at globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+
+; CHECK: Accesses by bin after update:
+; CHECK: [8-12] : 1
+; CHECK: - 5 - %1 = load i32, ptr %field22, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %1 = load i32, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [4-5] : 1
+; CHECK: - 9 - store i8 10, ptr %field11, align 4
+; CHECK: - c: i8 10
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 10, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [32-36] : 1
+; CHECK: - 9 - store i32 %3, ptr %field8, align 4
+; CHECK: - c: %3 = load i32, ptr %val, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %3, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [4-8] : 1
+; CHECK: - 5 - %0 = load i32, ptr %field11, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %0 = load i32, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [8-9] : 1
+; CHECK: - 9 - store i8 12, ptr %field22, align 4
+; CHECK: - c: i8 12
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 12, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+define dso_local i32 @track_chain(ptr nocapture %val) #0 {
+entry:
+ %f = alloca [10 x i32]
+ %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+ %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+ %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+ %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+
+ %field11 = getelementptr i32, ptr %field1, i32 0
+ %field22 = getelementptr i32, ptr %field2, i32 0
+ store i8 10, ptr %field11, align 4
+ store i8 12, ptr %field22, align 4
+
+ %1 = load i32, ptr %field11, align 4
+ %2 = load i32, ptr %field22, align 4
+ %3 = add i32 %1, %2
+
+ %4 = load i32, ptr %val, align 4
+ store i32 %4, ptr %field8, align 4
+
+ %5 = add i32 %4, %3
+
+ ret i32 %5
+}
+
+
+; CHECK: Accesses by bin after update:
+; CHECK: [12-16] : 1
+; CHECK: - 5 - %0 = load i32, ptr %field11, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %0 = load i32, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [16-17] : 1
+; CHECK: - 9 - store i8 12, ptr %field22, align 4
+; CHECK: - c: i8 12
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 12, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [32-36] : 1
+; CHECK: - 9 - store i32 %3, ptr %field8, align 4
+; CHECK: - c: %3 = load i32, ptr %val, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %3, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [16-20] : 1
+; CHECK: - 5 - %1 = load i32, ptr %field22, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %1 = load i32, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [12-13] : 1
+; CHECK: - 9 - store i8 10, ptr %field11, align 4
+; CHECK: - c: i8 10
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 10, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+define dso_local i32 @track_chain_2(ptr nocapture %val) #0 {
+entry:
+ %f = alloca [10 x i32]
+ %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+ %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+ %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+ %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+
+ %field11 = getelementptr i32, ptr %field1, i32 2
+ %field22 = getelementptr i32, ptr %field2, i32 2
+ store i8 10, ptr %field11, align 4
+ store i8 12, ptr %field22, align 4
+
+ %1 = load i32, ptr %field11, align 4
+ %2 = load i32, ptr %field22, align 4
+ %3 = add i32 %1, %2
+
+ %4 = load i32, ptr %val, align 4
+ store i32 %4, ptr %field8, align 4
+
+ %5 = add i32 %4, %3
+
+ ret i32 %5
+}
+
+
+; CHECK: Accesses by bin after update:
+; CHECK: [12-16] : 3
+; CHECK: - 5 - %0 = load i32, ptr %field11, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %0 = load i32, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: - 5 - %b = load i32, ptr %field3, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %b = load i32, ptr %field3, align 4
+; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: - 10 - store i32 1000, ptr %6, align 4
+; CHECK: - c: i32 1000
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 1000, ptr %6, align 4
+; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8
+; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 1000, ptr %6, align 4
+; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [16-17] : 1
+; CHECK: - 9 - store i8 12, ptr %field22, align 4
+; CHECK: - c: i8 12
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 12, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [32-36] : 4
+; CHECK: - 9 - store i32 %3, ptr %field8, align 4
+; CHECK: - c: %3 = load i32, ptr %val, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %3, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: - 5 - %a1 = load i32, ptr %field8, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %a1 = load i32, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: - 10 - store i32 1000, ptr %6, align 4
+; CHECK: - c: i32 1000
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 1000, ptr %6, align 4
+; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8
+; CHECK: %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 1000, ptr %6, align 4
+; CHECK: %6 = select i1 %cond, ptr %field3, ptr %field8
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: - 5 - %8 = load i32, ptr %field8, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %8 = load i32, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [16-20] : 1
+; CHECK: - 5 - %1 = load i32, ptr %field22, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %1 = load i32, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: %f = alloca [10 x i32], align 4
+; CHECK: [12-13] : 1
+; CHECK: - 9 - store i8 10, ptr %field11, align 4
+; CHECK: - c: i8 10
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i8 10, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: %f = alloca [10 x i32], align 4
+define dso_local i32 @track_chain_3(ptr nocapture %val, i1 %cond) #0 {
+entry:
+ %f = alloca [10 x i32]
+ %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+ %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+ %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+ %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+
+ %field11 = getelementptr i32, ptr %field1, i32 2
+ %field22 = getelementptr i32, ptr %field2, i32 2
+ store i8 10, ptr %field11, align 4
+ store i8 12, ptr %field22, align 4
+ %1 = load i32, ptr %field11, align 4
+ %2 = load i32, ptr %field22, align 4
+ %3 = add i32 %1, %2
+ %4 = load i32, ptr %val, align 4
+ store i32 %4, ptr %field8, align 4
+ %5 = add i32 %4, %3
+ %6 = load i32, ptr %val
+ %a1 = load i32, ptr %field8
+ %a = add i32 %a1, %6
+ %b = load i32, ptr %field3
+ ;%b = sub i32 %b1, %6
+ %7 = select i1 %cond, ptr %field3, ptr %field8
+ store i32 1000, ptr %7
+ %8 = add i32 %5, %b
+ %9 = load i32, ptr %field8
+ %10 = add i32 %9, %8
+ ret i32 %10
+}
+
+; CHECK: Accesses by bin after update:
+; CHECK: [8-12] : 2
+; CHECK: - 9 - store i32 %0, ptr %field2, align 4
+; CHECK: - c: %0 = load i32, ptr %val, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %0, ptr %field2, align 4
+; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: - 6 - %ret = load i32, ptr %x, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: [32-36] : 5
+; CHECK: - 6 - %ret = load i32, ptr %x, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 2
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: - 9 - store i32 %1, ptr %field8, align 4
+; CHECK: - c: %1 = load i32, ptr %val2, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %1, ptr %field8, align 4
+; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: - 9 - store i32 %0, ptr %field2, align 4
+; CHECK: - c: %0 = load i32, ptr %val, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %0, ptr %field2, align 4
+; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: - 6 - %ret = load i32, ptr %x, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field2 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: Backtrack a unique access path:
+; CHECK: %ret = load i32, ptr %x, align 4
+; CHECK: %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+; CHECK: - 9 - store i32 %1, ptr %field8, align 4
+; CHECK: - c: %1 = load i32, ptr %val2, align 4
+; CHECK: Print all access paths found:
+; CHECK: Backtrack a unique access path:
+; CHECK: store i32 %1, ptr %field8, align 4
+; CHECK: %field8 = getelementptr i32, ptr @globalBytes, i32 8
+; CHECK: @globalBytes = internal global [1024 x i8] zeroinitializer, align 16
+
+define dso_local i32 @phi_different_offsets(ptr nocapture %val, ptr nocapture %val2, i1 %cmp) {
+entry:
+ br i1 %cmp, label %then, label %else
+
+then:
+ %field2 = getelementptr i32, ptr @globalBytes, i32 2
+ %0 = load i32, ptr %val
+ store i32 %0, ptr %field2
+ br label %end
+
+else:
+ %field8 = getelementptr i32, ptr @globalBytes, i32 8
+ %2 = load i32, ptr %val2
+ store i32 %2, ptr %field8
+ br label %end
+
+end:
+ %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+ %ret = load i32, ptr %x
+ ret i32 %ret
+
+}
+
+define dso_local i32 @phi_same_offsets(ptr nocapture %val, ptr nocapture %val2, i1 %cmp) {
+entry:
+ br i1 %cmp, label %then, label %else
+
+then:
+ %field2 = getelementptr i32, ptr @globalBytes, i32 8
+ %0 = load i32, ptr %val
+ store i32 %0, ptr %field2
+ br label %end
+
+else:
+ %field8 = getelementptr i32, ptr @globalBytes, i32 8
+ %2 = load i32, ptr %val2
+ store i32 %2, ptr %field8
+ br label %end
+
+end:
+ %x = phi ptr [ %field2, %then ], [ %field8, %else ]
+ %ret = load i32, ptr %x
+ ret i32 %ret
+}
\ No newline at end of file
More information about the llvm-commits
mailing list