[llvm] [Attributor]: AApointerInfo - store the full chain of instructions that make up the access (PR #96526)
Vidush Singhal via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 26 10:54:03 PDT 2024
https://github.com/vidsinghal updated https://github.com/llvm/llvm-project/pull/96526
>From de229a83ed7865f0ad19da886f973e8baf33d96a Mon Sep 17 00:00:00 2001
From: vidsinghal <vidush.sl at gmail.com>
Date: Mon, 24 Jun 2024 11:00:52 -0400
Subject: [PATCH] Store the full chain of instructions that make up the access.
---
llvm/include/llvm/Transforms/IPO/Attributor.h | 111 +++++++++++++++-
.../Transforms/IPO/AttributorAttributes.cpp | 109 ++++++---------
.../pointer-info-track-access-chain.ll | 125 ++++++++++++++++++
3 files changed, 274 insertions(+), 71 deletions(-)
create mode 100644 llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h
index 6ba04dbc31db3..3e6d21ef1407a 100644
--- a/llvm/include/llvm/Transforms/IPO/Attributor.h
+++ b/llvm/include/llvm/Transforms/IPO/Attributor.h
@@ -5784,6 +5784,58 @@ struct AAPointerInfo : public AbstractAttribute {
AK_MUST_READ_WRITE = AK_MUST | AK_R | AK_W,
};
+ /// A helper containing a list of offsets computed for a Use. Ideally this
+ /// list should be strictly ascending, but we ensure that only when we
+ /// actually translate the list of offsets to a RangeList.
+ struct OffsetInfo {
+ using VecTy = SmallVector<int64_t>;
+ using const_iterator = VecTy::const_iterator;
+ VecTy Offsets;
+
+ const_iterator begin() const { return Offsets.begin(); }
+ const_iterator end() const { return Offsets.end(); }
+
+ bool operator==(const OffsetInfo &RHS) const {
+ return Offsets == RHS.Offsets;
+ }
+
+ bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
+
+ void insert(int64_t Offset) { Offsets.push_back(Offset); }
+ bool isUnassigned() const { return Offsets.empty(); }
+
+ bool isUnknown() const {
+ if (isUnassigned())
+ return false;
+ if (Offsets.size() == 1)
+ return Offsets.front() == AA::RangeTy::Unknown;
+ return false;
+ }
+
+ void setUnknown() {
+ Offsets.clear();
+ Offsets.push_back(AA::RangeTy::Unknown);
+ }
+
+ void addToAll(int64_t Inc) {
+ for (auto &Offset : Offsets)
+ Offset += Inc;
+ }
+
+ /// Copy offsets from \p R into the current list.
+ ///
+ /// Ideally all lists should be strictly ascending, but we defer that to the
+ /// actual use of the list. So we just blindly append here.
+ void merge(const OffsetInfo &R) {
+ Offsets.append(R.Offsets);
+ // ensure elements are unique.
+ sort(Offsets.begin(), Offsets.end());
+ Offsets.erase(std::unique(Offsets.begin(), Offsets.end()), Offsets.end());
+ }
+ };
+
+ using OffsetInfoMapTy = DenseMap<Value *, OffsetInfo>;
+
/// A container for a list of ranges.
struct RangeList {
// The set of ranges rarely contains more than one element, and is unlikely
@@ -5938,13 +5990,16 @@ struct AAPointerInfo : public AbstractAttribute {
/// An access description.
struct Access {
Access(Instruction *I, int64_t Offset, int64_t Size,
- std::optional<Value *> Content, AccessKind Kind, Type *Ty)
+ std::optional<Value *> Content, AccessKind Kind, Type *Ty,
+ OffsetInfoMapTy &OffsetInfoMap)
: LocalI(I), RemoteI(I), Content(Content), Ranges(Offset, Size),
Kind(Kind), Ty(Ty) {
verify();
+ addIntermediateInstruction(OffsetInfoMap);
}
Access(Instruction *LocalI, Instruction *RemoteI, const RangeList &Ranges,
- std::optional<Value *> Content, AccessKind K, Type *Ty)
+ std::optional<Value *> Content, AccessKind K, Type *Ty,
+ OffsetInfoMapTy &OffsetInfoMap)
: LocalI(LocalI), RemoteI(RemoteI), Content(Content), Ranges(Ranges),
Kind(K), Ty(Ty) {
if (Ranges.size() > 1) {
@@ -5952,13 +6007,15 @@ struct AAPointerInfo : public AbstractAttribute {
Kind = AccessKind(Kind & ~AK_MUST);
}
verify();
+ addIntermediateInstruction(OffsetInfoMap);
}
Access(Instruction *LocalI, Instruction *RemoteI, int64_t Offset,
int64_t Size, std::optional<Value *> Content, AccessKind Kind,
- Type *Ty)
+ Type *Ty, OffsetInfoMapTy &OffsetInfoMap)
: LocalI(LocalI), RemoteI(RemoteI), Content(Content),
Ranges(Offset, Size), Kind(Kind), Ty(Ty) {
verify();
+ addIntermediateInstruction(OffsetInfoMap);
}
Access(const Access &Other) = default;
@@ -6077,6 +6134,51 @@ struct AAPointerInfo : public AbstractAttribute {
}
}
+ // Generate the full chain of access cauing instruction to the
+ void addIntermediateInstruction(OffsetInfoMapTy &OffsetInfoMap) {
+
+ SmallVector<Instruction *> ReadyList;
+ DenseMap<Instruction *, bool> Visited;
+ ReadyList.push_back(LocalI);
+ CompleteAccessChain.push_back(LocalI);
+
+ // TODO: If an Entry does not have the same offset,
+ // However if it can reach the LocalI, should that
+ // be part of the intermidiate instruction list ?
+ for (auto &Entry : OffsetInfoMap) {
+
+ Instruction *IntermidiateInstruction =
+ dyn_cast<Instruction>(Entry.getFirst());
+ if (!IntermidiateInstruction)
+ continue;
+
+ OffsetInfo &Info = Entry.getSecond();
+ auto &OffsetVec = Info.Offsets;
+
+ // Check if the intermidiate instruction has at least one same Offset
+ // as the access causing local instruction.
+ // If so, we consider it part of the access causing chain of
+ // instructions.
+ bool HasAtLeastOneSameOffset = false;
+ for (auto *It = begin(); It != end(); It++) {
+ int64_t RangeOffset = It->Offset;
+ for (auto Offset : OffsetVec) {
+ if (RangeOffset == Offset) {
+ HasAtLeastOneSameOffset = true;
+ break;
+ }
+ }
+ }
+
+ if (HasAtLeastOneSameOffset)
+ CompleteAccessChain.push_back(IntermidiateInstruction);
+ }
+ }
+
+ const SmallVector<Instruction *> &getAccessChain() const {
+ return CompleteAccessChain;
+ }
+
const RangeList &getRanges() const { return Ranges; }
using const_iterator = RangeList::const_iterator;
@@ -6104,6 +6206,9 @@ struct AAPointerInfo : public AbstractAttribute {
/// The type of the content, thus the type read/written, can be null if not
/// available.
Type *Ty;
+
+ /// The full chain of instructions that participate in the Access.
+ SmallVector<Instruction *> CompleteAccessChain;
};
/// Create an abstract attribute view for the position \p IRP.
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index c4b9375a53a27..e575fda1990c8 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -850,6 +850,7 @@ struct AA::PointerInfo::State : public AbstractState {
ChangeStatus addAccess(Attributor &A, const AAPointerInfo::RangeList &Ranges,
Instruction &I, std::optional<Value *> Content,
AAPointerInfo::AccessKind Kind, Type *Ty,
+ AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap,
Instruction *RemoteI = nullptr);
AAPointerInfo::const_bin_iterator begin() const { return OffsetBins.begin(); }
@@ -929,7 +930,7 @@ struct AA::PointerInfo::State : public AbstractState {
ChangeStatus AA::PointerInfo::State::addAccess(
Attributor &A, const AAPointerInfo::RangeList &Ranges, Instruction &I,
std::optional<Value *> Content, AAPointerInfo::AccessKind Kind, Type *Ty,
- Instruction *RemoteI) {
+ AAPointerInfo::OffsetInfoMapTy &OffsetInfoMap, Instruction *RemoteI) {
RemoteI = RemoteI ? RemoteI : &I;
// Check if we have an access for this instruction, if not, simply add it.
@@ -956,7 +957,8 @@ ChangeStatus AA::PointerInfo::State::addAccess(
};
if (!AccExists) {
- AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty);
+ AccessList.emplace_back(&I, RemoteI, Ranges, Content, Kind, Ty,
+ OffsetInfoMap);
assert((AccessList.size() == AccIndex + 1) &&
"New Access should have been at AccIndex");
LocalList.push_back(AccIndex);
@@ -966,7 +968,8 @@ ChangeStatus AA::PointerInfo::State::addAccess(
// Combine the new Access with the existing Access, and then update the
// mapping in the offset bins.
- AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty);
+ AAPointerInfo::Access Acc(&I, RemoteI, Ranges, Content, Kind, Ty,
+ OffsetInfoMap);
auto &Current = AccessList[AccIndex];
auto Before = Current;
Current &= Acc;
@@ -1002,54 +1005,9 @@ ChangeStatus AA::PointerInfo::State::addAccess(
namespace {
-/// A helper containing a list of offsets computed for a Use. Ideally this
-/// list should be strictly ascending, but we ensure that only when we
-/// actually translate the list of offsets to a RangeList.
-struct OffsetInfo {
- using VecTy = SmallVector<int64_t>;
- using const_iterator = VecTy::const_iterator;
- VecTy Offsets;
-
- const_iterator begin() const { return Offsets.begin(); }
- const_iterator end() const { return Offsets.end(); }
-
- bool operator==(const OffsetInfo &RHS) const {
- return Offsets == RHS.Offsets;
- }
-
- bool operator!=(const OffsetInfo &RHS) const { return !(*this == RHS); }
-
- void insert(int64_t Offset) { Offsets.push_back(Offset); }
- bool isUnassigned() const { return Offsets.size() == 0; }
-
- bool isUnknown() const {
- if (isUnassigned())
- return false;
- if (Offsets.size() == 1)
- return Offsets.front() == AA::RangeTy::Unknown;
- return false;
- }
-
- void setUnknown() {
- Offsets.clear();
- Offsets.push_back(AA::RangeTy::Unknown);
- }
-
- void addToAll(int64_t Inc) {
- for (auto &Offset : Offsets) {
- Offset += Inc;
- }
- }
-
- /// Copy offsets from \p R into the current list.
- ///
- /// Ideally all lists should be strictly ascending, but we defer that to the
- /// actual use of the list. So we just blindly append here.
- void merge(const OffsetInfo &R) { Offsets.append(R.Offsets); }
-};
-
#ifndef NDEBUG
-static raw_ostream &operator<<(raw_ostream &OS, const OffsetInfo &OI) {
+static raw_ostream &operator<<(raw_ostream &OS,
+ const AAPointerInfo::OffsetInfo &OI) {
ListSeparator LS;
OS << "[";
for (auto Offset : OI) {
@@ -1365,7 +1323,8 @@ struct AAPointerInfoImpl
ChangeStatus translateAndAddStateFromCallee(Attributor &A,
const AAPointerInfo &OtherAA,
- CallBase &CB) {
+ CallBase &CB,
+ OffsetInfoMapTy &OffsetInfoMap) {
using namespace AA::PointerInfo;
if (!OtherAA.getState().isValidState() || !isValidState())
return indicatePessimisticFixpoint();
@@ -1388,8 +1347,9 @@ struct AAPointerInfoImpl
AK = AccessKind(AK & (IsByval ? AccessKind::AK_R : AccessKind::AK_RW));
AK = AccessKind(AK | (RAcc.isMayAccess() ? AK_MAY : AK_MUST));
- Changed |= addAccess(A, RAcc.getRanges(), CB, Content, AK,
- RAcc.getType(), RAcc.getRemoteInst());
+ Changed |=
+ addAccess(A, RAcc.getRanges(), CB, Content, AK, RAcc.getType(),
+ OffsetInfoMap, RAcc.getRemoteInst());
}
}
return Changed;
@@ -1418,7 +1378,7 @@ struct AAPointerInfoImpl
}
Changed |=
addAccess(A, NewRanges, CB, RAcc.getContent(), RAcc.getKind(),
- RAcc.getType(), RAcc.getRemoteInst());
+ RAcc.getType(), OffsetInfoMap, RAcc.getRemoteInst());
}
}
}
@@ -1449,9 +1409,18 @@ struct AAPointerInfoImpl
else
O << " - c: <unknown>\n";
}
+
+ // Print the access causing chain
+ O << "Print the full access chain:\n";
+ const auto &AccessChain = Acc.getAccessChain();
+ for (auto *Ins : AccessChain) {
+ O << " " << *Ins << "\n";
+ }
}
}
}
+
+ OffsetInfoMapTy OffsetInfoMap;
};
struct AAPointerInfoFloating : public AAPointerInfoImpl {
@@ -1463,7 +1432,7 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
bool handleAccess(Attributor &A, Instruction &I,
std::optional<Value *> Content, AccessKind Kind,
SmallVectorImpl<int64_t> &Offsets, ChangeStatus &Changed,
- Type &Ty) {
+ Type &Ty, OffsetInfoMapTy &OffsetInfoMap) {
using namespace AA::PointerInfo;
auto Size = AA::RangeTy::Unknown;
const DataLayout &DL = A.getDataLayout();
@@ -1481,7 +1450,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
!Content.value_or(nullptr) || !isa<Constant>(*Content) ||
(*Content)->getType() != VT ||
DL.getTypeStoreSize(VT->getElementType()).isScalable()) {
- Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty);
+ Changed = Changed | addAccess(A, {Offsets, Size}, I, Content, Kind, &Ty,
+ OffsetInfoMap);
} else {
// Handle vector stores with constant content element-wise.
// TODO: We could look for the elements or create instructions
@@ -1501,7 +1471,8 @@ struct AAPointerInfoFloating : public AAPointerInfoImpl {
// Add the element access.
Changed = Changed | addAccess(A, {ElementOffsets, ElementSize}, I,
- ElementContent, Kind, ElementType);
+ ElementContent, Kind, ElementType,
+ OffsetInfoMap);
// Advance the offsets for the next element.
for (auto &ElementOffset : ElementOffsets)
@@ -1596,7 +1567,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
const DataLayout &DL = A.getDataLayout();
Value &AssociatedValue = getAssociatedValue();
- DenseMap<Value *, OffsetInfo> OffsetInfoMap;
+ OffsetInfoMap.clear();
OffsetInfoMap[&AssociatedValue].insert(0);
auto HandlePassthroughUser = [&](Value *Usr, Value *CurPtr, bool &Follow) {
@@ -1631,6 +1602,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
auto UsePred = [&](const Use &U, bool &Follow) -> bool {
Value *CurPtr = U.get();
User *Usr = U.getUser();
+
LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in " << *Usr
<< "\n");
assert(OffsetInfoMap.count(CurPtr) &&
@@ -1750,7 +1722,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
AK = AccessKind(AK | AccessKind::AK_MAY);
if (!handleAccess(A, *LoadI, /* Content */ nullptr, AK,
OffsetInfoMap[CurPtr].Offsets, Changed,
- *LoadI->getType()))
+ *LoadI->getType(), OffsetInfoMap))
return false;
auto IsAssumption = [](Instruction &I) {
@@ -1834,9 +1806,10 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
Content =
A.getAssumedSimplified(*Assumption.first, *this,
UsedAssumedInformation, AA::Interprocedural);
- return handleAccess(
- A, *Assumption.second, Content, AccessKind::AK_ASSUMPTION,
- OffsetInfoMap[CurPtr].Offsets, Changed, *LoadI->getType());
+ return handleAccess(A, *Assumption.second, Content,
+ AccessKind::AK_ASSUMPTION,
+ OffsetInfoMap[CurPtr].Offsets, Changed,
+ *LoadI->getType(), OffsetInfoMap);
}
auto HandleStoreLike = [&](Instruction &I, Value *ValueOp, Type &ValueTy,
@@ -1863,7 +1836,7 @@ ChangeStatus AAPointerInfoFloating::updateImpl(Attributor &A) {
Content = A.getAssumedSimplified(
*ValueOp, *this, UsedAssumedInformation, AA::Interprocedural);
return handleAccess(A, I, Content, AK, OffsetInfoMap[CurPtr].Offsets,
- Changed, ValueTy);
+ Changed, ValueTy, OffsetInfoMap);
};
if (auto *StoreI = dyn_cast<StoreInst>(Usr))
@@ -1984,8 +1957,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
} else {
auto Kind =
ArgNo == 0 ? AccessKind::AK_MUST_WRITE : AccessKind::AK_MUST_READ;
- Changed =
- Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind, nullptr);
+ Changed = Changed | addAccess(A, {0, LengthVal}, *MI, nullptr, Kind,
+ nullptr, OffsetInfoMap);
}
LLVM_DEBUG({
dbgs() << "Accesses by bin after update:\n";
@@ -2005,8 +1978,8 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
auto *ArgAA =
A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
if (ArgAA && ArgAA->getState().isValidState())
- return translateAndAddStateFromCallee(A, *ArgAA,
- *cast<CallBase>(getCtxI()));
+ return translateAndAddStateFromCallee(
+ A, *ArgAA, *cast<CallBase>(getCtxI()), OffsetInfoMap);
if (!Arg->getParent()->isDeclaration())
return indicatePessimisticFixpoint();
}
@@ -2023,7 +1996,7 @@ struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
auto Kind =
ReadOnly ? AccessKind::AK_MAY_READ : AccessKind::AK_MAY_READ_WRITE;
return addAccess(A, AA::RangeTy::getUnknown(), *getCtxI(), nullptr, Kind,
- nullptr);
+ nullptr, OffsetInfoMap);
}
/// See AbstractAttribute::trackStatistics()
diff --git a/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
new file mode 100644
index 0000000000000..ded4d7640aa40
--- /dev/null
+++ b/llvm/test/Transforms/Attributor/pointer-info-track-access-chain.ll
@@ -0,0 +1,125 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals --version 2
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor -debug-only=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -debug-only=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: Accesses by bin after update:
+; CHECK: [8-12] : 1
+; CHECK: - 5 - %1 = load i32, ptr %field22, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print the full access chain:
+; CHECK: %1 = load i32, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+; CHECK: [4-5] : 1
+; CHECK: - 9 - store i8 10, ptr %field11, align 4
+; CHECK: - c: i8 10
+; CHECK: Print the full access chain:
+; CHECK: store i8 10, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: [32-36] : 1
+; CHECK: - 9 - store i32 %3, ptr %field8, align 4
+; CHECK: - c: %3 = load i32, ptr %val, align 4
+; CHECK: Print the full access chain:
+; CHECK: store i32 %3, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: [4-8] : 1
+; CHECK: - 5 - %0 = load i32, ptr %field11, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print the full access chain:
+; CHECK: %0 = load i32, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 0
+; CHECK: %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+; CHECK: [8-9] : 1
+; CHECK: - 9 - store i8 12, ptr %field22, align 4
+; CHECK: - c: i8 12
+; CHECK: Print the full access chain:
+; CHECK: store i8 12, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 0
+; CHECK: %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+define dso_local i32 @track_chain(ptr nocapture %val) #0 {
+entry:
+ %f = alloca [10 x i32]
+ %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+ %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+ %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+ %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+
+ %field11 = getelementptr i32, ptr %field1, i32 0
+ %field22 = getelementptr i32, ptr %field2, i32 0
+ store i8 10, ptr %field11, align 4
+ store i8 12, ptr %field22, align 4
+
+ %1 = load i32, ptr %field11, align 4
+ %2 = load i32, ptr %field22, align 4
+ %3 = add i32 %1, %2
+
+ %4 = load i32, ptr %val, align 4
+ store i32 %4, ptr %field8, align 4
+
+ %5 = add i32 %4, %3
+
+ ret i32 %5
+}
+
+; TODO: Should %field11 be backtracked further in this case?
+; It is not currently because, for example the offsets of the load
+; at [12-16] are different than the %field1 GEP. But we could
+; store it since it is a pointer operand, ie, comes form a GEP.
+; It could also be a function argument, (a ptr passed to a function).
+; CHECK: Accesses by bin after update:
+; CHECK: [12-16] : 1
+; CHECK: - 5 - %0 = load i32, ptr %field11, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print the full access chain:
+; CHECK: %0 = load i32, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+; CHECK: [16-17] : 1
+; CHECK: - 9 - store i8 12, ptr %field22, align 4
+; CHECK: - c: i8 12
+; CHECK: Print the full access chain:
+; CHECK: store i8 12, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: [32-36] : 1
+; CHECK: - 9 - store i32 %3, ptr %field8, align 4
+; CHECK: - c: %3 = load i32, ptr %val, align 4
+; CHECK: Print the full access chain:
+; CHECK: store i32 %3, ptr %field8, align 4
+; CHECK: %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+; CHECK: [16-20] : 1
+; CHECK: - 5 - %1 = load i32, ptr %field22, align 4
+; CHECK: - c: <unknown>
+; CHECK: Print the full access chain:
+; CHECK: %1 = load i32, ptr %field22, align 4
+; CHECK: %field22 = getelementptr i32, ptr %field2, i32 2
+; CHECK: [12-13] : 1
+; CHECK: - 9 - store i8 10, ptr %field11, align 4
+; CHECK: - c: i8 10
+; CHECK: Print the full access chain:
+; CHECK: store i8 10, ptr %field11, align 4
+; CHECK: %field11 = getelementptr i32, ptr %field1, i32 2
+define dso_local i32 @track_chain_2(ptr nocapture %val) #0 {
+entry:
+ %f = alloca [10 x i32]
+ %field1 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 1
+ %field2 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 2
+ %field3 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 3
+ %field8 = getelementptr inbounds [10 x i32], ptr %f, i32 0, i32 8
+
+ %field11 = getelementptr i32, ptr %field1, i32 2
+ %field22 = getelementptr i32, ptr %field2, i32 2
+ store i8 10, ptr %field11, align 4
+ store i8 12, ptr %field22, align 4
+
+ %1 = load i32, ptr %field11, align 4
+ %2 = load i32, ptr %field22, align 4
+ %3 = add i32 %1, %2
+
+ %4 = load i32, ptr %val, align 4
+ store i32 %4, ptr %field8, align 4
+
+ %5 = add i32 %4, %3
+
+ ret i32 %5
+}
More information about the llvm-commits
mailing list