[lld] bf535ac - [ELF][AArch64] Support R_AARCH64_{CALL26, JUMP26} range extension thunks with addends
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 2 10:07:34 PST 2019
Author: Fangrui Song
Date: 2019-12-02T10:07:24-08:00
New Revision: bf535ac4a28268e6a481acee5a794ef5deed5018
URL: https://github.com/llvm/llvm-project/commit/bf535ac4a28268e6a481acee5a794ef5deed5018
DIFF: https://github.com/llvm/llvm-project/commit/bf535ac4a28268e6a481acee5a794ef5deed5018.diff
LOG: [ELF][AArch64] Support R_AARCH64_{CALL26,JUMP26} range extension thunks with addends
Fixes AArch64 part of PR40438
The current range extension thunk framework does not handle a relocation
relative to a STT_SECTION symbol with a non-zero addend, which may be
used by jumps/calls to local functions on some RELA targets (AArch64,
powerpc ELFv1, powerpc64 ELFv2, etc). See PR40438 and the following
code for examples:
// clang -target $target a.cc
// .text.cold may be placed in a separate output section.
// The distance between bar in .text.cold and foo in .text may be larger than 128MiB.
static void foo() {}
__attribute__((section(".text.cold"))) static int bar() { foo(); return
0; }
__attribute__((used)) static int dummy = bar();
This patch makes such thunks with addends work for AArch64. The target
independent part can be reused by PPC in the future.
On REL targets (ARM, MIPS), jumps/calls are not represented as
STT_SECTION + non-zero addend (see
MCELFObjectTargetWriter::needsRelocateWithSymbol), so they don't need
this feature, but we need to make sure this patch does not affect them.
Reviewed By: peter.smith
Differential Revision: https://reviews.llvm.org/D70637
Added:
Modified:
lld/ELF/Arch/AArch64.cpp
lld/ELF/Arch/ARM.cpp
lld/ELF/Arch/Mips.cpp
lld/ELF/Arch/PPC.cpp
lld/ELF/Arch/PPC64.cpp
lld/ELF/Relocations.cpp
lld/ELF/Relocations.h
lld/ELF/Target.cpp
lld/ELF/Target.h
lld/ELF/Thunks.cpp
lld/ELF/Thunks.h
lld/test/ELF/aarch64-thunk-pi.s
lld/test/ELF/aarch64-thunk-script.s
Removed:
################################################################################
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 5cf07029fa1d..4e80e3d78f16 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -40,7 +40,8 @@ class AArch64 : public TargetInfo {
void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
int32_t index, unsigned relOff) const override;
bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const override;
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const override;
uint32_t getThunkSectionSpacing() const override;
bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
bool usesOnlyLowPageBits(RelType type) const override;
@@ -230,13 +231,14 @@ void AArch64::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
}
bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const {
// ELF for the ARM 64-bit architecture, section Call and Jump relocations
// only permits range extension thunks for R_AARCH64_CALL26 and
// R_AARCH64_JUMP26 relocation types.
if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26)
return false;
- uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+ uint64_t dst = expr == R_PLT_PC ? s.getPltVA() : s.getVA(a);
return !inBranchRange(type, branchAddr, dst);
}
diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 41baea496d36..0f522d324ff7 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -39,7 +39,8 @@ class ARM final : public TargetInfo {
void addPltSymbols(InputSection &isec, uint64_t off) const override;
void addPltHeaderSymbols(InputSection &isd) const override;
bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const override;
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const override;
uint32_t getThunkSectionSpacing() const override;
bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
@@ -262,7 +263,7 @@ void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
}
bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s, int64_t /*a*/) const {
// If S is an undefined weak symbol and does not have a PLT entry then it
// will be resolved as a branch to the next instruction.
if (s.isUndefWeak() && !s.isInPlt())
diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index 74c0b59ecd5b..317b22ec264c 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -35,7 +35,8 @@ template <class ELFT> class MIPS final : public TargetInfo {
void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
int32_t index, unsigned relOff) const override;
bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const override;
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const override;
void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
bool usesOnlyLowPageBits(RelType type) const override;
};
@@ -356,7 +357,8 @@ void MIPS<ELFT>::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
template <class ELFT>
bool MIPS<ELFT>::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s,
+ int64_t /*a*/) const {
// Any MIPS PIC code function is invoked with its address in register $t9.
// So if we have a branch instruction from non-PIC code to the PIC one
// we cannot make the jump directly and need to create a small stubs
diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index c4eecb9a29c2..b0d93c6ce9b5 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -37,7 +37,8 @@ class PPC final : public TargetInfo {
}
void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
bool needsThunk(RelExpr expr, RelType relocType, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const override;
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const override;
uint32_t getThunkSectionSpacing() const override;
bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
@@ -169,7 +170,7 @@ void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const {
}
bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s, int64_t /*a*/) const {
if (type != R_PPC_REL24 && type != R_PPC_PLTREL24)
return false;
if (s.isInPlt())
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 6299fd8a5243..ed16974af867 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -205,7 +205,8 @@ class PPC64 final : public TargetInfo {
void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
void writeGotHeader(uint8_t *buf) const override;
bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const override;
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const override;
uint32_t getThunkSectionSpacing() const override;
bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
@@ -898,7 +899,7 @@ void PPC64::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
}
bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s, int64_t /*a*/) const {
if (type != R_PPC64_REL14 && type != R_PPC64_REL24)
return false;
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 60ea1119aaf6..ea30662d3824 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -1779,14 +1779,19 @@ static int64_t getPCBias(RelType type) {
std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
Relocation &rel, uint64_t src) {
std::vector<Thunk *> *thunkVec = nullptr;
+ int64_t addend = rel.addend + getPCBias(rel.type);
- // We use (section, offset) pair to find the thunk position if possible so
- // that we create only one thunk for aliased symbols or ICFed sections.
+ // We use a ((section, offset), addend) pair to find the thunk position if
+ // possible so that we create only one thunk for aliased symbols or ICFed
+ // sections. There may be multiple relocations sharing the same (section,
+ // offset + addend) pair. We may revert the relocation back to its original
+ // non-Thunk target, so we cannot fold offset + addend.
if (auto *d = dyn_cast<Defined>(rel.sym))
if (!d->isInPlt() && d->section)
- thunkVec = &thunkedSymbolsBySection[{d->section->repl, d->value}];
+ thunkVec = &thunkedSymbolsBySectionAndAddend[{
+ {d->section->repl, d->value}, addend}];
if (!thunkVec)
- thunkVec = &thunkedSymbols[rel.sym];
+ thunkVec = &thunkedSymbols[{rel.sym, addend}];
// Check existing Thunks for Sym to see if they can be reused
for (Thunk *t : *thunkVec)
@@ -1813,6 +1818,9 @@ bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
rel.sym->getVA(rel.addend) + getPCBias(rel.type)))
return true;
rel.sym = &t->destination;
+ // TODO Restore addend on all targets.
+ if (config->emachine == EM_AARCH64)
+ rel.addend = t->addend;
if (rel.sym->isInPlt())
rel.expr = toPlt(rel.expr);
}
@@ -1868,7 +1876,7 @@ bool ThunkCreator::createThunks(ArrayRef<OutputSection *> outputSections) {
continue;
if (!target->needsThunk(rel.expr, rel.type, isec->file, src,
- *rel.sym))
+ *rel.sym, rel.addend))
continue;
Thunk *t;
@@ -1890,9 +1898,13 @@ bool ThunkCreator::createThunks(ArrayRef<OutputSection *> outputSections) {
rel.sym = t->getThunkTargetSym();
rel.expr = fromPlt(rel.expr);
+ // On AArch64, a jump/call relocation may be encoded as STT_SECTION
+ // + non-zero addend, clear the addend after redirection.
+ //
// The addend of R_PPC_PLTREL24 should be ignored after changing to
// R_PC.
- if (config->emachine == EM_PPC && rel.type == R_PPC_PLTREL24)
+ if (config->emachine == EM_AARCH64 ||
+ (config->emachine == EM_PPC && rel.type == R_PPC_PLTREL24))
rel.addend = 0;
}
diff --git a/lld/ELF/Relocations.h b/lld/ELF/Relocations.h
index befe15b8f3b9..060c55e3086d 100644
--- a/lld/ELF/Relocations.h
+++ b/lld/ELF/Relocations.h
@@ -150,10 +150,17 @@ class ThunkCreator {
bool normalizeExistingThunk(Relocation &rel, uint64_t src);
- // Record all the available Thunks for a Symbol
- llvm::DenseMap<std::pair<SectionBase *, uint64_t>, std::vector<Thunk *>>
- thunkedSymbolsBySection;
- llvm::DenseMap<Symbol *, std::vector<Thunk *>> thunkedSymbols;
+ // Record all the available Thunks for a (Symbol, addend) pair, where Symbol
+ // is represented as a (section, offset) pair. There may be multiple
+ // relocations sharing the same (section, offset + addend) pair. We may revert
+ // a relocation back to its original non-Thunk target, and restore the
+ // original addend, so we cannot fold offset + addend. A nested pair is used
+ // because DenseMapInfo is not specialized for std::tuple.
+ llvm::DenseMap<std::pair<std::pair<SectionBase *, uint64_t>, int64_t>,
+ std::vector<Thunk *>>
+ thunkedSymbolsBySectionAndAddend;
+ llvm::DenseMap<std::pair<Symbol *, int64_t>, std::vector<Thunk *>>
+ thunkedSymbols;
// Find a Thunk from the Thunks symbol definition, we can use this to find
// the Thunk from a relocation to the Thunks symbol definition.
diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp
index 024e0cfec27b..e1e99556ec7b 100644
--- a/lld/ELF/Target.cpp
+++ b/lld/ELF/Target.cpp
@@ -130,7 +130,8 @@ int64_t TargetInfo::getImplicitAddend(const uint8_t *buf, RelType type) const {
bool TargetInfo::usesOnlyLowPageBits(RelType type) const { return false; }
bool TargetInfo::needsThunk(RelExpr expr, RelType type, const InputFile *file,
- uint64_t branchAddr, const Symbol &s) const {
+ uint64_t branchAddr, const Symbol &s,
+ int64_t a) const {
return false;
}
diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h
index 39b999176717..9d147ed7b1f3 100644
--- a/lld/ELF/Target.h
+++ b/lld/ELF/Target.h
@@ -58,7 +58,7 @@ class TargetInfo {
// targeting S.
virtual bool needsThunk(RelExpr expr, RelType relocType,
const InputFile *file, uint64_t branchAddr,
- const Symbol &s) const;
+ const Symbol &s, int64_t a) const;
// On systems with range extensions we place collections of Thunks at
// regular spacings that enable the majority of branches reach the Thunks.
diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index d13517786043..8d2cdba616a6 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -49,7 +49,7 @@ namespace {
// AArch64 long range Thunks
class AArch64ABSLongThunk final : public Thunk {
public:
- AArch64ABSLongThunk(Symbol &dest) : Thunk(dest) {}
+ AArch64ABSLongThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {}
uint32_t size() override { return 16; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
@@ -57,7 +57,7 @@ class AArch64ABSLongThunk final : public Thunk {
class AArch64ADRPThunk final : public Thunk {
public:
- AArch64ADRPThunk(Symbol &dest) : Thunk(dest) {}
+ AArch64ADRPThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {}
uint32_t size() override { return 12; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
@@ -73,7 +73,7 @@ class AArch64ADRPThunk final : public Thunk {
// if the target is in range, otherwise it creates a long thunk.
class ARMThunk : public Thunk {
public:
- ARMThunk(Symbol &dest) : Thunk(dest) {}
+ ARMThunk(Symbol &dest) : Thunk(dest, 0) {}
bool getMayUseShortThunk();
uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); }
@@ -103,7 +103,7 @@ class ARMThunk : public Thunk {
// which has a range of 16MB.
class ThumbThunk : public Thunk {
public:
- ThumbThunk(Symbol &dest) : Thunk(dest) { alignment = 2; }
+ ThumbThunk(Symbol &dest) : Thunk(dest, 0) { alignment = 2; }
bool getMayUseShortThunk();
uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); }
@@ -209,7 +209,7 @@ class ThumbV6MPILongThunk final : public ThumbThunk {
// MIPS LA25 thunk
class MipsThunk final : public Thunk {
public:
- MipsThunk(Symbol &dest) : Thunk(dest) {}
+ MipsThunk(Symbol &dest) : Thunk(dest, 0) {}
uint32_t size() override { return 16; }
void writeTo(uint8_t *buf) override;
@@ -220,7 +220,7 @@ class MipsThunk final : public Thunk {
// microMIPS R2-R5 LA25 thunk
class MicroMipsThunk final : public Thunk {
public:
- MicroMipsThunk(Symbol &dest) : Thunk(dest) {}
+ MicroMipsThunk(Symbol &dest) : Thunk(dest, 0) {}
uint32_t size() override { return 14; }
void writeTo(uint8_t *buf) override;
@@ -231,7 +231,7 @@ class MicroMipsThunk final : public Thunk {
// microMIPS R6 LA25 thunk
class MicroMipsR6Thunk final : public Thunk {
public:
- MicroMipsR6Thunk(Symbol &dest) : Thunk(dest) {}
+ MicroMipsR6Thunk(Symbol &dest) : Thunk(dest, 0) {}
uint32_t size() override { return 12; }
void writeTo(uint8_t *buf) override;
@@ -241,8 +241,11 @@ class MicroMipsR6Thunk final : public Thunk {
class PPC32PltCallStub final : public Thunk {
public:
- PPC32PltCallStub(const InputSection &isec, const Relocation &rel, Symbol &dest)
- : Thunk(dest), addend(rel.type == R_PPC_PLTREL24 ? rel.addend : 0),
+ // For R_PPC_PLTREL24, Thunk::addend records the addend which will be used to
+ // decide the offsets in the call stub.
+ PPC32PltCallStub(const InputSection &isec, const Relocation &rel,
+ Symbol &dest)
+ : Thunk(dest, rel.type == R_PPC_PLTREL24 ? rel.addend : 0),
file(isec.file) {}
uint32_t size() override { return 16; }
void writeTo(uint8_t *buf) override;
@@ -250,10 +253,6 @@ class PPC32PltCallStub final : public Thunk {
bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override;
private:
- // For R_PPC_PLTREL24, this records the addend, which will be used to decide
- // the offsets in the call stub.
- uint32_t addend;
-
// Records the call site of the call stub.
const InputFile *file;
};
@@ -268,7 +267,7 @@ class PPC32PltCallStub final : public Thunk {
// 3) Transferring control to the target function through an indirect branch.
class PPC64PltCallStub final : public Thunk {
public:
- PPC64PltCallStub(Symbol &dest) : Thunk(dest) {}
+ PPC64PltCallStub(Symbol &dest) : Thunk(dest, 0) {}
uint32_t size() override { return 20; }
void writeTo(uint8_t *buf) override;
void addSymbols(ThunkSection &isec) override;
@@ -289,7 +288,7 @@ class PPC64LongBranchThunk : public Thunk {
void addSymbols(ThunkSection &isec) override;
protected:
- PPC64LongBranchThunk(Symbol &dest) : Thunk(dest) {}
+ PPC64LongBranchThunk(Symbol &dest) : Thunk(dest, 0) {}
};
class PPC64PILongBranchThunk final : public PPC64LongBranchThunk {
@@ -332,8 +331,8 @@ void Thunk::setOffset(uint64_t newOffset) {
// AArch64 long range Thunks
-static uint64_t getAArch64ThunkDestVA(const Symbol &s) {
- uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA();
+static uint64_t getAArch64ThunkDestVA(const Symbol &s, int64_t a) {
+ uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA(a);
return v;
}
@@ -344,7 +343,7 @@ void AArch64ABSLongThunk::writeTo(uint8_t *buf) {
0x00, 0x00, 0x00, 0x00, // L0: .xword S
0x00, 0x00, 0x00, 0x00,
};
- uint64_t s = getAArch64ThunkDestVA(destination);
+ uint64_t s = getAArch64ThunkDestVA(destination, addend);
memcpy(buf, data, sizeof(data));
target->relocateOne(buf + 8, R_AARCH64_ABS64, s);
}
@@ -367,7 +366,7 @@ void AArch64ADRPThunk::writeTo(uint8_t *buf) {
0x10, 0x02, 0x00, 0x91, // add x16, x16, R_AARCH64_ADD_ABS_LO12_NC(Dest)
0x00, 0x02, 0x1f, 0xd6, // br x16
};
- uint64_t s = getAArch64ThunkDestVA(destination);
+ uint64_t s = getAArch64ThunkDestVA(destination, addend);
uint64_t p = getThunkTargetSym()->getVA();
memcpy(buf, data, sizeof(data));
target->relocateOne(buf, R_AARCH64_ADR_PREL_PG_HI21,
@@ -795,16 +794,16 @@ void PPC64LongBranchThunk::addSymbols(ThunkSection &isec) {
isec);
}
-Thunk::Thunk(Symbol &d) : destination(d), offset(0) {}
+Thunk::Thunk(Symbol &d, int64_t a) : destination(d), addend(a), offset(0) {}
Thunk::~Thunk() = default;
-static Thunk *addThunkAArch64(RelType type, Symbol &s) {
+static Thunk *addThunkAArch64(RelType type, Symbol &s, int64_t a) {
if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26)
fatal("unrecognized relocation type");
if (config->picThunk)
- return make<AArch64ADRPThunk>(s);
- return make<AArch64ABSLongThunk>(s);
+ return make<AArch64ADRPThunk>(s, a);
+ return make<AArch64ABSLongThunk>(s, a);
}
// Creates a thunk for Thumb-ARM interworking.
@@ -895,7 +894,8 @@ static Thunk *addThunkMips(RelType type, Symbol &s) {
return make<MipsThunk>(s);
}
-static Thunk *addThunkPPC32(const InputSection &isec, const Relocation &rel, Symbol &s) {
+static Thunk *addThunkPPC32(const InputSection &isec, const Relocation &rel,
+ Symbol &s) {
assert((rel.type == R_PPC_REL24 || rel.type == R_PPC_PLTREL24) &&
"unexpected relocation type for thunk");
return make<PPC32PltCallStub>(isec, rel, s);
@@ -914,9 +914,10 @@ static Thunk *addThunkPPC64(RelType type, Symbol &s) {
Thunk *addThunk(const InputSection &isec, Relocation &rel) {
Symbol &s = *rel.sym;
+ int64_t a = rel.addend;
if (config->emachine == EM_AARCH64)
- return addThunkAArch64(rel.type, s);
+ return addThunkAArch64(rel.type, s, a);
if (config->emachine == EM_ARM)
return addThunkArm(rel.type, s);
diff --git a/lld/ELF/Thunks.h b/lld/ELF/Thunks.h
index 2d27ee5f6c38..891bf8e5e434 100644
--- a/lld/ELF/Thunks.h
+++ b/lld/ELF/Thunks.h
@@ -27,7 +27,7 @@ class ThunkSection;
// Thunks are assigned to synthetic ThunkSections
class Thunk {
public:
- Thunk(Symbol &destination);
+ Thunk(Symbol &destination, int64_t addend);
virtual ~Thunk();
virtual uint32_t size() = 0;
@@ -55,11 +55,12 @@ class Thunk {
Defined *getThunkTargetSym() const { return syms[0]; }
- // The alignment requirement for this Thunk, defaults to the size of the
- // typical code section alignment.
Symbol &destination;
+ int64_t addend;
llvm::SmallVector<Defined *, 3> syms;
uint64_t offset = 0;
+ // The alignment requirement for this Thunk, defaults to the size of the
+ // typical code section alignment.
uint32_t alignment = 4;
};
diff --git a/lld/test/ELF/aarch64-thunk-pi.s b/lld/test/ELF/aarch64-thunk-pi.s
index 965a93764a73..2545f8fb2ea1 100644
--- a/lld/test/ELF/aarch64-thunk-pi.s
+++ b/lld/test/ELF/aarch64-thunk-pi.s
@@ -16,28 +16,36 @@ low_target:
bl high_target
ret
// CHECK: low_target:
-// CHECK-NEXT: d8: bl #0x10 <__AArch64ADRPThunk_high_target>
+// CHECK-NEXT: d8: bl #0x18 <__AArch64ADRPThunk_high_target>
// CHECK-NEXT: ret
.hidden low_target2
.globl low_target2
.type low_target2, %function
low_target2:
- // Need thunk to high_target
+ // Need thunk to high_target2
bl high_target2
+ // .text_high+8 = high_target2
+ bl .text_high+8
ret
// CHECK: low_target2:
-// CHECK-NEXT: e0: bl #0x14 <__AArch64ADRPThunk_high_target2>
+// CHECK-NEXT: e0: bl #0x1c <__AArch64ADRPThunk_high_target2>
+// CHECK-NEXT: e4: bl #0x24 <__AArch64ADRPThunk_>
// CHECK-NEXT: ret
// Expect range extension thunks for .text_low
// adrp calculation is (PC + signed immediate) & (!0xfff)
// CHECK: __AArch64ADRPThunk_high_target:
-// CHECK-NEXT: e8: adrp x16, #0x10000000
+// CHECK-NEXT: f0: adrp x16, #0x10000000
// CHECK-NEXT: add x16, x16, #0x40
// CHECK-NEXT: br x16
// CHECK: __AArch64ADRPThunk_high_target2:
-// CHECK-NEXT: f4: adrp x16, #0x10000000
+// CHECK-NEXT: fc: adrp x16, #0x10000000
+// CHECK-NEXT: add x16, x16, #0x8
+// CHECK-NEXT: br x16
+/// Identical to the previous one, but for the target .text_high+8.
+// CHECK: __AArch64ADRPThunk_:
+// CHECK-NEXT: 108: adrp x16, #0x10000000
// CHECK-NEXT: add x16, x16, #0x8
// CHECK-NEXT: br x16
diff --git a/lld/test/ELF/aarch64-thunk-script.s b/lld/test/ELF/aarch64-thunk-script.s
index cf8187dd5bb3..176c137223b2 100644
--- a/lld/test/ELF/aarch64-thunk-script.s
+++ b/lld/test/ELF/aarch64-thunk-script.s
@@ -15,6 +15,8 @@
_start:
// Need thunk to high_target at plt
bl high_target
+ // Need thunk to .text_high+4
+ bl .text_high+4
ret
.section .text_high, "ax", %progbits
@@ -28,14 +30,21 @@ high_target:
// CHECK: Disassembly of section .text_low:
// CHECK-EMPTY:
// CHECK-NEXT: _start:
-// CHECK-NEXT: 2000: bl #0x8 <__AArch64AbsLongThunk_high_target>
+// CHECK-NEXT: 2000: bl #0x10 <__AArch64AbsLongThunk_high_target>
+// CHECK-NEXT: 2004: bl #0x1c <__AArch64AbsLongThunk_>
// CHECK-NEXT: ret
// CHECK: __AArch64AbsLongThunk_high_target:
-// CHECK-NEXT: 2008: ldr x16, #0x8
+// CHECK-NEXT: 2010: ldr x16, #0x8
// CHECK-NEXT: br x16
// CHECK: $d:
-// CHECK-NEXT: 2010: 00 20 00 08 .word 0x08002000
-// CHECK-NEXT: 2014: 00 00 00 00 .word 0x00000000
+// CHECK-NEXT: 2018: 00 20 00 08 .word 0x08002000
+// CHECK-NEXT: 201c: 00 00 00 00 .word 0x00000000
+// CHECK: __AArch64AbsLongThunk_:
+// CHECK-NEXT: 2020: ldr x16, #0x8
+// CHECK-NEXT: 2024: br x16
+// CHECK: $d:
+// CHECK-NEXT: 2028: 04 20 00 08 .word 0x08002004
+// CHECK-NEXT: 202c: 00 00 00 00 .word 0x00000000
// CHECK: Disassembly of section .text_high:
// CHECK-EMPTY:
// CHECK-NEXT: high_target:
More information about the llvm-commits
mailing list