[lld] b4feb26 - [ELF] Move target to Ctx. NFC
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 21 23:53:41 PDT 2024
Author: Fangrui Song
Date: 2024-08-21T23:53:36-07:00
New Revision: b4feb26606de84ff53d9b65a3b79c00a2b4d7c22
URL: https://github.com/llvm/llvm-project/commit/b4feb26606de84ff53d9b65a3b79c00a2b4d7c22
DIFF: https://github.com/llvm/llvm-project/commit/b4feb26606de84ff53d9b65a3b79c00a2b4d7c22.diff
LOG: [ELF] Move target to Ctx. NFC
Ctx was introduced in March 2022 as a more suitable place for such
singletons.
Follow-up to driver (2022-10) and script (2024-08).
Added:
Modified:
lld/ELF/AArch64ErrataFix.cpp
lld/ELF/ARMErrataFix.cpp
lld/ELF/Arch/AArch64.cpp
lld/ELF/Arch/ARM.cpp
lld/ELF/Arch/LoongArch.cpp
lld/ELF/Arch/PPC64.cpp
lld/ELF/Arch/RISCV.cpp
lld/ELF/Config.h
lld/ELF/Driver.cpp
lld/ELF/InputSection.cpp
lld/ELF/LinkerScript.cpp
lld/ELF/MarkLive.cpp
lld/ELF/OutputSections.cpp
lld/ELF/Relocations.cpp
lld/ELF/ScriptParser.cpp
lld/ELF/Symbols.cpp
lld/ELF/SyntheticSections.cpp
lld/ELF/Target.cpp
lld/ELF/Target.h
lld/ELF/Thunks.cpp
lld/ELF/Writer.cpp
Removed:
################################################################################
diff --git a/lld/ELF/AArch64ErrataFix.cpp b/lld/ELF/AArch64ErrataFix.cpp
index 6ca0a3038714b5..7a1477ebb79ad0 100644
--- a/lld/ELF/AArch64ErrataFix.cpp
+++ b/lld/ELF/AArch64ErrataFix.cpp
@@ -413,12 +413,12 @@ void Patch843419Section::writeTo(uint8_t *buf) {
write32le(buf, read32le(patchee->content().begin() + patcheeOffset));
// Apply any relocation transferred from the original patchee section.
- target->relocateAlloc(*this, buf);
+ ctx.target->relocateAlloc(*this, buf);
// Return address is the next instruction after the one we have just copied.
uint64_t s = getLDSTAddr() + 4;
uint64_t p = patchSym->getVA() + 4;
- target->relocateNoSym(buf + 4, R_AARCH64_JUMP26, s - p);
+ ctx.target->relocateNoSym(buf + 4, R_AARCH64_JUMP26, s - p);
}
void AArch64Err843419Patcher::init() {
@@ -483,7 +483,8 @@ void AArch64Err843419Patcher::insertPatches(
InputSectionDescription &isd, std::vector<Patch843419Section *> &patches) {
uint64_t isecLimit;
uint64_t prevIsecLimit = isd.sections.front()->outSecOff;
- uint64_t patchUpperBound = prevIsecLimit + target->getThunkSectionSpacing();
+ uint64_t patchUpperBound =
+ prevIsecLimit + ctx.target->getThunkSectionSpacing();
uint64_t outSecAddr = isd.sections.front()->getParent()->addr;
// Set the outSecOff of patches to the place where we want to insert them.
@@ -500,7 +501,7 @@ void AArch64Err843419Patcher::insertPatches(
(*patchIt)->outSecOff = prevIsecLimit;
++patchIt;
}
- patchUpperBound = prevIsecLimit + target->getThunkSectionSpacing();
+ patchUpperBound = prevIsecLimit + ctx.target->getThunkSectionSpacing();
}
prevIsecLimit = isecLimit;
}
diff --git a/lld/ELF/ARMErrataFix.cpp b/lld/ELF/ARMErrataFix.cpp
index 9fb791f4848a36..7068344a73b955 100644
--- a/lld/ELF/ARMErrataFix.cpp
+++ b/lld/ELF/ARMErrataFix.cpp
@@ -157,11 +157,11 @@ static uint64_t getThumbDestAddr(uint64_t sourceAddr, uint32_t instr) {
write16le(buf + 2, instr & 0x0000ffff);
int64_t offset;
if (isBcc(instr))
- offset = target->getImplicitAddend(buf, R_ARM_THM_JUMP19);
+ offset = ctx.target->getImplicitAddend(buf, R_ARM_THM_JUMP19);
else if (isB(instr))
- offset = target->getImplicitAddend(buf, R_ARM_THM_JUMP24);
+ offset = ctx.target->getImplicitAddend(buf, R_ARM_THM_JUMP24);
else
- offset = target->getImplicitAddend(buf, R_ARM_THM_CALL);
+ offset = ctx.target->getImplicitAddend(buf, R_ARM_THM_CALL);
// A BLX instruction from Thumb to Arm may have an address that is
// not 4-byte aligned. As Arm instructions are always 4-byte aligned
// the instruction is calculated (from Arm ARM):
@@ -182,7 +182,7 @@ void Patch657417Section::writeTo(uint8_t *buf) {
write32le(buf, 0x9000f000);
// If we have a relocation then apply it.
if (!relocs().empty()) {
- target->relocateAlloc(*this, buf);
+ ctx.target->relocateAlloc(*this, buf);
return;
}
@@ -197,7 +197,8 @@ void Patch657417Section::writeTo(uint8_t *buf) {
// state with a PC Bias of 4.
uint64_t pcBias = isBLX(instr) ? 8 : 4;
uint64_t p = getVA(pcBias);
- target->relocateNoSym(buf, isARM ? R_ARM_JUMP24 : R_ARM_THM_JUMP24, s - p);
+ ctx.target->relocateNoSym(buf, isARM ? R_ARM_JUMP24 : R_ARM_THM_JUMP24,
+ s - p);
}
// Given a branch instruction spanning two 4KiB regions, at offset off from the
@@ -233,7 +234,7 @@ static bool patchInRange(const InputSection *isec, uint64_t off,
// after isec. As there can be more than one patch in the patch section we
// add 0x100 as contingency to account for worst case of 1 branch every 4KiB
// for a 1 MiB range.
- return target->inBranchRange(
+ return ctx.target->inBranchRange(
isBcc(instr) ? R_ARM_THM_JUMP19 : R_ARM_THM_JUMP24, isec->getVA(off),
isec->getVA() + isec->getSize() + 0x100);
}
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 0106349b2d2778..75d85d14bd62c3 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -784,7 +784,7 @@ bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
write32le(buf + adrpRel.offset, 0xd503201f);
// adr x_<dest_reg>
write32le(buf + adrRel.offset, 0x10000000 | adrpDestReg);
- target->relocate(buf + adrRel.offset, adrRel, val);
+ ctx.target->relocate(buf + adrRel.offset, adrRel, val);
return true;
}
@@ -854,11 +854,13 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
// add x_<dest reg>, x_<dest reg>
write32le(buf + addRel.offset, 0x91000000 | adrpDestReg | (adrpDestReg << 5));
- target->relocate(buf + adrpSymRel.offset, adrpSymRel,
- SignExtend64(getAArch64Page(sym.getVA()) -
- getAArch64Page(secAddr + adrpSymRel.offset),
- 64));
- target->relocate(buf + addRel.offset, addRel, SignExtend64(sym.getVA(), 64));
+ ctx.target->relocate(
+ buf + adrpSymRel.offset, adrpSymRel,
+ SignExtend64(getAArch64Page(sym.getVA()) -
+ getAArch64Page(secAddr + adrpSymRel.offset),
+ 64));
+ ctx.target->relocate(buf + addRel.offset, addRel,
+ SignExtend64(sym.getVA(), 64));
tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf);
return true;
}
diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 07a7535c4a231d..827ba3a6c68a14 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -1386,9 +1386,9 @@ void ArmCmseSGSection::writeTo(uint8_t *buf) {
write16(p + 2, 0xe97f);
write16(p + 4, 0xf000); // B.W S
write16(p + 6, 0xb000);
- target->relocateNoSym(p + 4, R_ARM_THM_JUMP24,
- s->acleSeSym->getVA() -
- (getVA() + s->offset + s->size));
+ ctx.target->relocateNoSym(p + 4, R_ARM_THM_JUMP24,
+ s->acleSeSym->getVA() -
+ (getVA() + s->offset + s->size));
}
}
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 68defb750c538e..01e42a5867b7ef 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -349,7 +349,8 @@ void LoongArch::writePltHeader(uint8_t *buf) const {
write32le(buf + 0, insn(PCADDU12I, R_T2, hi20(offset), 0));
write32le(buf + 4, insn(sub, R_T1, R_T1, R_T3));
write32le(buf + 8, insn(ld, R_T3, R_T2, lo12(offset)));
- write32le(buf + 12, insn(addi, R_T1, R_T1, lo12(-target->pltHeaderSize - 12)));
+ write32le(buf + 12,
+ insn(addi, R_T1, R_T1, lo12(-ctx.target->pltHeaderSize - 12)));
write32le(buf + 16, insn(addi, R_T0, R_T2, lo12(offset)));
write32le(buf + 20, insn(srli, R_T1, R_T1, config->is64 ? 1 : 2));
write32le(buf + 24, insn(ld, R_T0, R_T0, config->wordsize));
@@ -374,8 +375,8 @@ void LoongArch::writePlt(uint8_t *buf, const Symbol &sym,
}
RelType LoongArch::getDynRel(RelType type) const {
- return type == target->symbolicRel ? type
- : static_cast<RelType>(R_LARCH_NONE);
+ return type == ctx.target->symbolicRel ? type
+ : static_cast<RelType>(R_LARCH_NONE);
}
RelExpr LoongArch::getRelExpr(const RelType type, const Symbol &s,
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 753ced698a05c0..15abbfda664331 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -409,8 +409,8 @@ static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
return false;
// Add PPC64TocOffset that will be subtracted by PPC64::relocate().
- static_cast<const PPC64 &>(*target).relaxGot(bufLoc, rel,
- tocRelative + ppc64TocOffset);
+ static_cast<const PPC64 &>(*ctx.target)
+ .relaxGot(bufLoc, rel, tocRelative + ppc64TocOffset);
return true;
}
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index dc9e541d5d8bef..2435864ce5a7f0 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -235,7 +235,7 @@ void RISCV::writePltHeader(uint8_t *buf) const {
write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
- write32le(buf + 12, itype(ADDI, X_T1, X_T1, -target->pltHeaderSize - 12));
+ write32le(buf + 12, itype(ADDI, X_T1, X_T1, -ctx.target->pltHeaderSize - 12));
write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
write32le(buf + 20, itype(SRLI, X_T1, X_T1, config->is64 ? 1 : 2));
write32le(buf + 24, itype(load, X_T0, X_T0, config->wordsize));
@@ -256,8 +256,8 @@ void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
}
RelType RISCV::getDynRel(RelType type) const {
- return type == target->symbolicRel ? type
- : static_cast<RelType>(R_RISCV_NONE);
+ return type == ctx.target->symbolicRel ? type
+ : static_cast<RelType>(R_RISCV_NONE);
}
RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index 5987edee0e93e7..fd40ec9805aa2b 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -47,6 +47,7 @@ class Symbol;
class BitcodeCompiler;
class OutputSection;
class LinkerScript;
+class TargetInfo;
struct Partition;
struct PhdrEntry;
@@ -485,6 +486,7 @@ struct DuplicateSymbol {
struct Ctx {
LinkerDriver driver;
LinkerScript *script;
+ TargetInfo *target;
// These variables are initialized by Writer and should not be used before
// Writer is initialized.
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 308fd86c29ba12..37460a7a6c8eb4 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -94,6 +94,7 @@ void elf::errorOrWarn(const Twine &msg) {
void Ctx::reset() {
driver = LinkerDriver();
script = nullptr;
+ target = nullptr;
bufferStart = nullptr;
mainPart = nullptr;
@@ -2065,13 +2066,13 @@ void LinkerDriver::inferMachineType() {
// each target.
static uint64_t getMaxPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "max-page-size",
- target->defaultMaxPageSize);
+ ctx.target->defaultMaxPageSize);
if (!isPowerOf2_64(val)) {
error("max-page-size: value isn't a power of 2");
- return target->defaultMaxPageSize;
+ return ctx.target->defaultMaxPageSize;
}
if (config->nmagic || config->omagic) {
- if (val != target->defaultMaxPageSize)
+ if (val != ctx.target->defaultMaxPageSize)
warn("-z max-page-size set, but paging disabled by omagic or nmagic");
return 1;
}
@@ -2082,13 +2083,13 @@ static uint64_t getMaxPageSize(opt::InputArgList &args) {
// each target.
static uint64_t getCommonPageSize(opt::InputArgList &args) {
uint64_t val = args::getZOptionValue(args, OPT_z, "common-page-size",
- target->defaultCommonPageSize);
+ ctx.target->defaultCommonPageSize);
if (!isPowerOf2_64(val)) {
error("common-page-size: value isn't a power of 2");
- return target->defaultCommonPageSize;
+ return ctx.target->defaultCommonPageSize;
}
if (config->nmagic || config->omagic) {
- if (val != target->defaultCommonPageSize)
+ if (val != ctx.target->defaultCommonPageSize)
warn("-z common-page-size set, but paging disabled by omagic or nmagic");
return 1;
}
@@ -3106,9 +3107,9 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &args) {
// The Target instance handles target-specific stuff, such as applying
// relocations or writing a PLT section. It also contains target-dependent
// values such as a default image base address.
- target = getTarget();
+ ctx.target = getTarget();
- config->eflags = target->calcEFlags();
+ config->eflags = ctx.target->calcEFlags();
// maxPageSize (sometimes called abi page size) is the maximum page size that
// the output can be run on. For example if the OS can use 4k or 64k page
// sizes then maxPageSize must be 64k for the output to be useable on both.
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index fd3e947428388b..03b91804c81543 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -434,7 +434,7 @@ void InputSection::copyRelocations(uint8_t *buf) {
template <class ELFT, class RelTy, class RelIt>
void InputSection::copyRelocations(uint8_t *buf,
llvm::iterator_range<RelIt> rels) {
- const TargetInfo &target = *elf::target;
+ const TargetInfo &target = *elf::ctx.target;
InputSectionBase *sec = getRelocatedSection();
(void)sec->contentMaybeDecompress(); // uncompress if needed
@@ -950,7 +950,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
template <class ELFT, class RelTy>
void InputSection::relocateNonAlloc(uint8_t *buf, Relocs<RelTy> rels) {
const unsigned bits = sizeof(typename ELFT::uint) * 8;
- const TargetInfo &target = *elf::target;
+ const TargetInfo &target = *elf::ctx.target;
const auto emachine = config->emachine;
const bool isDebug = isDebugSection(*this);
const bool isDebugLine = isDebug && name == ".debug_line";
@@ -1103,7 +1103,7 @@ void InputSectionBase::relocate(uint8_t *buf, uint8_t *bufEnd) {
adjustSplitStackFunctionPrologues<ELFT>(buf, bufEnd);
if (flags & SHF_ALLOC) {
- target->relocateAlloc(*this, buf);
+ ctx.target->relocateAlloc(*this, buf);
return;
}
@@ -1198,8 +1198,8 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
if (Defined *f = getEnclosingFunction(rel.offset)) {
prologues.insert(f);
- if (target->adjustPrologueForCrossSplitStack(buf + f->value, end,
- f->stOther))
+ if (ctx.target->adjustPrologueForCrossSplitStack(buf + f->value, end,
+ f->stOther))
continue;
if (!getFile<ELFT>()->someNoSplitStack)
error(lld::toString(this) + ": " + f->getName() +
@@ -1208,7 +1208,7 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf,
}
}
- if (target->needsMoreStackNonSplit)
+ if (ctx.target->needsMoreStackNonSplit)
switchMorestackCallsToMorestackNonSplit(prologues, morestackCalls);
}
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index 9ddda99d90f02d..8bab26cd3b0f07 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -1487,7 +1487,7 @@ LinkerScript::assignAddresses() {
dot = config->imageBase.value_or(0);
} else {
// Assign addresses to headers right now.
- dot = target->getImageBase();
+ dot = ctx.target->getImageBase();
ctx.out.elfHeader->addr = dot;
ctx.out.programHeaders->addr = dot + ctx.out.elfHeader->size;
dot += getHeaderSize();
diff --git a/lld/ELF/MarkLive.cpp b/lld/ELF/MarkLive.cpp
index b2558a20ba1a78..56ff53fc89bddf 100644
--- a/lld/ELF/MarkLive.cpp
+++ b/lld/ELF/MarkLive.cpp
@@ -75,8 +75,8 @@ template <class ELFT> class MarkLive {
template <class ELFT>
static uint64_t getAddend(InputSectionBase &sec,
const typename ELFT::Rel &rel) {
- return target->getImplicitAddend(sec.content().begin() + rel.r_offset,
- rel.getType(config->isMips64EL));
+ return ctx.target->getImplicitAddend(sec.content().begin() + rel.r_offset,
+ rel.getType(config->isMips64EL));
}
template <class ELFT>
diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp
index c076f442558fac..cb17e107d6dae2 100644
--- a/lld/ELF/OutputSections.cpp
+++ b/lld/ELF/OutputSections.cpp
@@ -278,7 +278,7 @@ static void nopInstrFill(uint8_t *buf, size_t size) {
unsigned i = 0;
if (size == 0)
return;
- std::vector<std::vector<uint8_t>> nopFiller = *target->nopInstrs;
+ std::vector<std::vector<uint8_t>> nopFiller = *ctx.target->nopInstrs;
unsigned num = size / nopFiller.back().size();
for (unsigned c = 0; c < num; ++c) {
memcpy(buf + i, nopFiller.back().data(), nopFiller.back().size());
@@ -541,7 +541,7 @@ void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
else
end = buf + sections[i + 1]->outSecOff;
if (isec->nopFiller) {
- assert(target->nopInstrs);
+ assert(ctx.target->nopInstrs);
nopInstrFill(start, end - start);
} else
fill(start, end - start, filler);
@@ -857,7 +857,7 @@ std::array<uint8_t, 4> OutputSection::getFiller() {
if (filler)
return *filler;
if (flags & SHF_EXECINSTR)
- return target->trapInstr;
+ return ctx.target->trapInstr;
return {0, 0, 0, 0};
}
@@ -890,7 +890,7 @@ void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
int64_t writtenAddend =
relOsec->type == SHT_NOBITS
? 0
- : target->getImplicitAddend(relocTarget, rel.type);
+ : ctx.target->getImplicitAddend(relocTarget, rel.type);
if (addend != writtenAddend)
internalLinkerError(
getErrorLocation(relocTarget),
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index fa94842f3636b3..e5f58f1a7dd129 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -399,7 +399,7 @@ template <class ELFT> static void addCopyRelSymbol(SharedSymbol &ss) {
for (SharedSymbol *sym : getSymbolsAt<ELFT>(ss))
replaceWithDefined(*sym, *sec, 0, sym->size);
- ctx.mainPart->relaDyn->addSymbolReloc(target->copyRel, *sec, 0, ss);
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->copyRel, *sec, 0, ss);
}
// .eh_frame sections are mergeable input sections, so their input
@@ -511,7 +511,7 @@ int64_t RelocationScanner::computeMipsAddend(const RelTy &rel, RelExpr expr,
for (const RelTy *ri = &rel; ri != static_cast<const RelTy *>(end); ++ri)
if (ri->getType(config->isMips64EL) == pairTy &&
ri->getSymbol(config->isMips64EL) == symIndex)
- return target->getImplicitAddend(buf + ri->r_offset, pairTy);
+ return ctx.target->getImplicitAddend(buf + ri->r_offset, pairTy);
warn("can't find matching " + toString(pairTy) + " relocation for " +
toString(type));
@@ -876,8 +876,8 @@ static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec,
if (sym.isTagged()) {
std::lock_guard<std::mutex> lock(relocMutex);
- part.relaDyn->addRelativeReloc(target->relativeRel, isec, offsetInSec, sym,
- addend, type, expr);
+ part.relaDyn->addRelativeReloc(ctx.target->relativeRel, isec, offsetInSec,
+ sym, addend, type, expr);
// With MTE globals, we always want to derive the address tag by `ldg`-ing
// the symbol. When we have a RELATIVE relocation though, we no longer have
// a reference to the symbol. Because of this, when we have an addend that
@@ -906,8 +906,8 @@ static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec,
part.relrDyn->relocs.push_back({&isec, isec.relocs().size() - 1});
return;
}
- part.relaDyn->addRelativeReloc<shard>(target->relativeRel, isec, offsetInSec,
- sym, addend, type, expr);
+ part.relaDyn->addRelativeReloc<shard>(ctx.target->relativeRel, isec,
+ offsetInSec, sym, addend, type, expr);
}
template <class PltSection, class GotPltSection>
@@ -927,7 +927,7 @@ void elf::addGotEntry(Symbol &sym) {
// If preemptible, emit a GLOB_DAT relocation.
if (sym.isPreemptible) {
- ctx.mainPart->relaDyn->addReloc({target->gotRel, in.got.get(), off,
+ ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, in.got.get(), off,
DynamicReloc::AgainstSymbol, sym, 0,
R_ABS});
return;
@@ -936,20 +936,20 @@ void elf::addGotEntry(Symbol &sym) {
// Otherwise, the value is either a link-time constant or the load base
// plus a constant.
if (!config->isPic || isAbsolute(sym))
- in.got->addConstant({R_ABS, target->symbolicRel, off, 0, &sym});
+ in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym});
else
- addRelativeReloc(*in.got, off, sym, 0, R_ABS, target->symbolicRel);
+ addRelativeReloc(*in.got, off, sym, 0, R_ABS, ctx.target->symbolicRel);
}
static void addTpOffsetGotEntry(Symbol &sym) {
in.got->addEntry(sym);
uint64_t off = sym.getGotOffset();
if (!sym.isPreemptible && !config->shared) {
- in.got->addConstant({R_TPREL, target->symbolicRel, off, 0, &sym});
+ in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym});
return;
}
ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
- target->tlsGotRel, *in.got, off, sym, target->symbolicRel);
+ ctx.target->tlsGotRel, *in.got, off, sym, ctx.target->symbolicRel);
}
// Return true if we can define a symbol in the executable that
@@ -997,7 +997,7 @@ bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
// These never do, except if the entire file is position dependent or if
// only the low bits are used.
if (e == R_GOT || e == R_PLT)
- return target->usesOnlyLowPageBits(type) || !config->isPic;
+ return ctx.target->usesOnlyLowPageBits(type) || !config->isPic;
// R_AARCH64_AUTH_ABS64 requires a dynamic relocation.
if (sym.isPreemptible || e == R_AARCH64_AUTH)
@@ -1018,7 +1018,7 @@ bool RelocationScanner::isStaticLinkTimeConstant(RelExpr e, RelType type,
if (!absVal && relE)
return true;
if (!absVal && !relE)
- return target->usesOnlyLowPageBits(type);
+ return ctx.target->usesOnlyLowPageBits(type);
assert(absVal && relE);
@@ -1072,8 +1072,8 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
type == R_HEX_GD_PLT_B32_PCREL_X)))
expr = fromPlt(expr);
} else if (!isAbsoluteValue(sym)) {
- expr =
- target->adjustGotPcExpr(type, addend, sec->content().data() + offset);
+ expr = ctx.target->adjustGotPcExpr(type, addend,
+ sec->content().data() + offset);
// If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
// needing the GOT if we can't relax everything.
if (expr == R_RELAX_GOT_PC)
@@ -1142,15 +1142,15 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
!(config->zText ||
(isa<EhInputSection>(sec) && config->emachine != EM_MIPS));
if (canWrite) {
- RelType rel = target->getDynRel(type);
+ RelType rel = ctx.target->getDynRel(type);
if (oneof<R_GOT, R_LOONGARCH_GOT>(expr) ||
- (rel == target->symbolicRel && !sym.isPreemptible)) {
+ (rel == ctx.target->symbolicRel && !sym.isPreemptible)) {
addRelativeReloc<true>(*sec, offset, sym, addend, expr, type);
return;
}
if (rel != 0) {
- if (config->emachine == EM_MIPS && rel == target->symbolicRel)
- rel = target->relativeRel;
+ if (config->emachine == EM_MIPS && rel == ctx.target->symbolicRel)
+ rel = ctx.target->relativeRel;
std::lock_guard<std::mutex> lock(relocMutex);
Partition &part = sec->getPartition();
if (config->emachine == EM_AARCH64 && type == R_AARCH64_AUTH_ABS64) {
@@ -1363,9 +1363,9 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym,
if (oneof<R_TLSLD_GOT, R_TLSLD_GOTPLT, R_TLSLD_PC, R_TLSLD_HINT>(expr)) {
// Local-Dynamic relocs can be optimized to Local-Exec.
if (execOptimize) {
- c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type,
+ c.addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type,
offset, addend, &sym});
- return target->getTlsGdRelaxSkip(type);
+ return ctx.target->getTlsGdRelaxSkip(type);
}
if (expr == R_TLSLD_HINT)
return 1;
@@ -1377,7 +1377,7 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym,
// Local-Dynamic relocs can be optimized to Local-Exec.
if (expr == R_DTPREL) {
if (execOptimize)
- expr = target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE);
+ expr = ctx.target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE);
c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
@@ -1408,13 +1408,13 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym,
// the categorization in RISCV::relocateAlloc.
if (sym.isPreemptible) {
sym.setFlags(NEEDS_TLSGD_TO_IE);
- c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type,
+ c.addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type,
offset, addend, &sym});
} else {
- c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type,
+ c.addReloc({ctx.target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type,
offset, addend, &sym});
}
- return target->getTlsGdRelaxSkip(type);
+ return ctx.target->getTlsGdRelaxSkip(type);
}
if (oneof<R_GOT, R_GOTPLT, R_GOT_PC, R_AARCH64_GOT_PAGE_PC,
@@ -1427,7 +1427,8 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym,
} else if (expr != R_TLSIE_HINT) {
sym.setFlags(NEEDS_TLSIE);
// R_GOT needs a relative relocation for PIC on i386 and Hexagon.
- if (expr == R_GOT && config->isPic && !target->usesOnlyLowPageBits(type))
+ if (expr == R_GOT && config->isPic &&
+ !ctx.target->usesOnlyLowPageBits(type))
addRelativeReloc<true>(c, offset, sym, addend, expr, type);
else
c.addReloc({expr, type, offset, addend, &sym});
@@ -1461,10 +1462,11 @@ void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
if (offset == uint64_t(-1))
return;
- RelExpr expr = target->getRelExpr(type, sym, sec->content().data() + offset);
+ RelExpr expr =
+ ctx.target->getRelExpr(type, sym, sec->content().data() + offset);
int64_t addend = RelTy::HasAddend
? getAddend<ELFT>(rel)
- : target->getImplicitAddend(
+ : ctx.target->getImplicitAddend(
sec->content().data() + rel.r_offset, type);
if (LLVM_UNLIKELY(config->emachine == EM_MIPS))
addend += computeMipsAddend<ELFT>(rel, expr, sym.isLocal());
@@ -1731,7 +1733,7 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
directSym->allocateAux();
auto &dyn =
config->androidPackDynRelocs ? *in.relaPlt : *ctx.mainPart->relaDyn;
- addPltEntry(*in.iplt, *in.igotPlt, dyn, target->iRelativeRel, *directSym);
+ addPltEntry(*in.iplt, *in.igotPlt, dyn, ctx.target->iRelativeRel, *directSym);
sym.allocateAux();
ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx;
@@ -1739,7 +1741,7 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
// Change the value to the IPLT and redirect all references to it.
auto &d = cast<Defined>(sym);
d.section = in.iplt.get();
- d.value = d.getPltIdx() * target->ipltEntrySize;
+ d.value = d.getPltIdx() * ctx.target->ipltEntrySize;
d.size = 0;
// It's important to set the symbol type here so that dynamic loaders
// don't try to call the PLT as if it were an ifunc resolver.
@@ -1770,7 +1772,7 @@ void elf::postScanRelocations() {
if (flags & NEEDS_GOT)
addGotEntry(sym);
if (flags & NEEDS_PLT)
- addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, target->pltRel, sym);
+ addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, ctx.target->pltRel, sym);
if (flags & NEEDS_COPY) {
if (sym.isObject()) {
invokeELFT(addCopyRelSymbol, cast<SharedSymbol>(sym));
@@ -1781,8 +1783,8 @@ void elf::postScanRelocations() {
assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT));
if (!sym.isDefined()) {
replaceWithDefined(sym, *in.plt,
- target->pltHeaderSize +
- target->pltEntrySize * sym.getPltIdx(),
+ ctx.target->pltHeaderSize +
+ ctx.target->pltEntrySize * sym.getPltIdx(),
0);
sym.setFlags(NEEDS_COPY);
if (config->emachine == EM_PPC) {
@@ -1803,37 +1805,37 @@ void elf::postScanRelocations() {
if (flags & NEEDS_TLSDESC) {
got->addTlsDescEntry(sym);
ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
- target->tlsDescRel, *got, got->getTlsDescOffset(sym), sym,
- target->tlsDescRel);
+ ctx.target->tlsDescRel, *got, got->getTlsDescOffset(sym), sym,
+ ctx.target->tlsDescRel);
}
if (flags & NEEDS_TLSGD) {
got->addDynTlsEntry(sym);
uint64_t off = got->getGlobalDynOffset(sym);
if (isLocalInExecutable)
// Write one to the GOT slot.
- got->addConstant({R_ADDEND, target->symbolicRel, off, 1, &sym});
+ got->addConstant({R_ADDEND, ctx.target->symbolicRel, off, 1, &sym});
else
- ctx.mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *got,
- off, sym);
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsModuleIndexRel,
+ *got, off, sym);
// If the symbol is preemptible we need the dynamic linker to write
// the offset too.
uint64_t offsetOff = off + config->wordsize;
if (sym.isPreemptible)
- ctx.mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *got,
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsOffsetRel, *got,
offsetOff, sym);
else
- got->addConstant({R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym});
+ got->addConstant({R_ABS, ctx.target->tlsOffsetRel, offsetOff, 0, &sym});
}
if (flags & NEEDS_TLSGD_TO_IE) {
got->addEntry(sym);
- ctx.mainPart->relaDyn->addSymbolReloc(target->tlsGotRel, *got,
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsGotRel, *got,
sym.getGotOffset(), sym);
}
if (flags & NEEDS_GOT_DTPREL) {
got->addEntry(sym);
got->addConstant(
- {R_ABS, target->tlsOffsetRel, sym.getGotOffset(), 0, &sym});
+ {R_ABS, ctx.target->tlsOffsetRel, sym.getGotOffset(), 0, &sym});
}
if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE))
@@ -1845,10 +1847,10 @@ void elf::postScanRelocations() {
static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0);
if (config->shared)
ctx.mainPart->relaDyn->addReloc(
- {target->tlsModuleIndexRel, got, got->getTlsIndexOff()});
+ {ctx.target->tlsModuleIndexRel, got, got->getTlsIndexOff()});
else
- got->addConstant(
- {R_ADDEND, target->symbolicRel, got->getTlsIndexOff(), 1, &dummy});
+ got->addConstant({R_ADDEND, ctx.target->symbolicRel,
+ got->getTlsIndexOff(), 1, &dummy});
}
assert(ctx.symAux.size() == 1);
@@ -2054,8 +2056,8 @@ ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os,
ThunkSection *ts = tp.first;
uint64_t tsBase = os->addr + ts->outSecOff - pcBias;
uint64_t tsLimit = tsBase + ts->getSize();
- if (target->inBranchRange(rel.type, src,
- (src > tsLimit) ? tsBase : tsLimit))
+ if (ctx.target->inBranchRange(rel.type, src,
+ (src > tsLimit) ? tsBase : tsLimit))
return ts;
}
@@ -2065,11 +2067,11 @@ ThunkSection *ThunkCreator::getISDThunkSec(OutputSection *os,
// possible. Error if InputSection is so large we cannot place ThunkSection
// anywhere in Range.
uint64_t thunkSecOff = isec->outSecOff;
- if (!target->inBranchRange(rel.type, src,
- os->addr + thunkSecOff + rel.addend)) {
+ if (!ctx.target->inBranchRange(rel.type, src,
+ os->addr + thunkSecOff + rel.addend)) {
thunkSecOff = isec->outSecOff + isec->getSize();
- if (!target->inBranchRange(rel.type, src,
- os->addr + thunkSecOff + rel.addend))
+ if (!ctx.target->inBranchRange(rel.type, src,
+ os->addr + thunkSecOff + rel.addend))
fatal("InputSection too large for range extension thunk " +
isec->getObjMsg(src - (os->addr + isec->outSecOff)));
}
@@ -2123,8 +2125,7 @@ ThunkSection *ThunkCreator::getISThunkSec(InputSection *isec) {
// allow for the creation of a short thunk.
void ThunkCreator::createInitialThunkSections(
ArrayRef<OutputSection *> outputSections) {
- uint32_t thunkSectionSpacing = target->getThunkSectionSpacing();
-
+ uint32_t thunkSectionSpacing = ctx.target->getThunkSectionSpacing();
forEachInputSectionDescription(
outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
if (isd->sections.empty())
@@ -2188,7 +2189,7 @@ ThunkSection *ThunkCreator::addThunkSection(OutputSection *os,
uint64_t isdSize = isd->sections.back()->outSecOff +
isd->sections.back()->getSize() -
isd->sections.front()->outSecOff;
- if (os->size > target->getThunkSectionSpacing() && isdSize > 4096)
+ if (os->size > ctx.target->getThunkSectionSpacing() && isdSize > 4096)
ts->roundUpSizeForErrata = true;
}
isd->thunkSections.push_back({ts, pass});
@@ -2230,8 +2231,8 @@ std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
for (Thunk *t : *thunkVec)
if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) &&
t->isCompatibleWith(*isec, rel) &&
- target->inBranchRange(rel.type, src,
- t->getThunkTargetSym()->getVA(-pcBias)))
+ ctx.target->inBranchRange(rel.type, src,
+ t->getThunkTargetSym()->getVA(-pcBias)))
return std::make_pair(t, false);
// No existing compatible Thunk in range, create a new one
@@ -2246,7 +2247,7 @@ std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
// relocation back to its original non-Thunk target.
bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
if (Thunk *t = thunks.lookup(rel.sym)) {
- if (target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend)))
+ if (ctx.target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend)))
return true;
rel.sym = &t->destination;
rel.addend = t->addend;
@@ -2286,7 +2287,7 @@ bool ThunkCreator::createThunks(uint32_t pass,
this->pass = pass;
bool addressesChanged = false;
- if (pass == 0 && target->getThunkSectionSpacing())
+ if (pass == 0 && ctx.target->getThunkSectionSpacing())
createInitialThunkSections(outputSections);
// Create all the Thunks and insert them into synthetic ThunkSections. The
@@ -2306,8 +2307,8 @@ bool ThunkCreator::createThunks(uint32_t pass,
if (pass > 0 && normalizeExistingThunk(rel, src))
continue;
- if (!target->needsThunk(rel.expr, rel.type, isec->file, src,
- *rel.sym, rel.addend))
+ if (!ctx.target->needsThunk(rel.expr, rel.type, isec->file, src,
+ *rel.sym, rel.addend))
continue;
Thunk *t;
@@ -2378,8 +2379,8 @@ void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) {
if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
if (needEntry) {
sym->allocateAux();
- addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, target->pltRel,
- *sym);
+ addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt,
+ ctx.target->pltRel, *sym);
needEntry = false;
}
rel.sym = sym;
diff --git a/lld/ELF/ScriptParser.cpp b/lld/ELF/ScriptParser.cpp
index 08773bfb6ffe07..819036a1ab1820 100644
--- a/lld/ELF/ScriptParser.cpp
+++ b/lld/ELF/ScriptParser.cpp
@@ -1326,7 +1326,7 @@ Expr ScriptParser::readExpr1(Expr lhs, int minPrec) {
Expr ScriptParser::getPageSize() {
std::string location = getCurrentLocation();
return [=]() -> uint64_t {
- if (target)
+ if (ctx.target)
return config->commonPageSize;
error(location + ": unable to calculate page size");
return 4096; // Return a dummy value.
diff --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp
index 13fc6dc0dd572a..b08c679ab36850 100644
--- a/lld/ELF/Symbols.cpp
+++ b/lld/ELF/Symbols.cpp
@@ -152,7 +152,7 @@ uint64_t Symbol::getGotVA() const {
}
uint64_t Symbol::getGotOffset() const {
- return getGotIdx() * target->gotEntrySize;
+ return getGotIdx() * ctx.target->gotEntrySize;
}
uint64_t Symbol::getGotPltVA() const {
@@ -163,15 +163,16 @@ uint64_t Symbol::getGotPltVA() const {
uint64_t Symbol::getGotPltOffset() const {
if (isInIplt)
- return getPltIdx() * target->gotEntrySize;
- return (getPltIdx() + target->gotPltHeaderEntriesNum) * target->gotEntrySize;
+ return getPltIdx() * ctx.target->gotEntrySize;
+ return (getPltIdx() + ctx.target->gotPltHeaderEntriesNum) *
+ ctx.target->gotEntrySize;
}
uint64_t Symbol::getPltVA() const {
- uint64_t outVA = isInIplt
- ? in.iplt->getVA() + getPltIdx() * target->ipltEntrySize
- : in.plt->getVA() + in.plt->headerSize +
- getPltIdx() * target->pltEntrySize;
+ uint64_t outVA =
+ isInIplt ? in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize
+ : in.plt->getVA() + in.plt->headerSize +
+ getPltIdx() * ctx.target->pltEntrySize;
// While linking microMIPS code PLT code are always microMIPS
// code. Set the less-significant bit to track that fact.
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 4c2b6db08b99a2..df82e9ed0652ec 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -641,7 +641,7 @@ void EhFrameSection::writeTo(uint8_t *buf) {
// in the output buffer, but relocateAlloc() still works because
// getOffset() takes care of discontiguous section pieces.
for (EhInputSection *s : sections)
- target->relocateAlloc(*s, buf);
+ ctx.target->relocateAlloc(*s, buf);
if (getPartition().ehFrameHdr && getPartition().ehFrameHdr->getParent())
getPartition().ehFrameHdr->write();
@@ -649,8 +649,8 @@ void EhFrameSection::writeTo(uint8_t *buf) {
GotSection::GotSection()
: SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
- target->gotEntrySize, ".got") {
- numEntries = target->gotHeaderEntriesNum;
+ ctx.target->gotEntrySize, ".got") {
+ numEntries = ctx.target->gotHeaderEntriesNum;
}
void GotSection::addConstant(const Relocation &r) { relocations.push_back(r); }
@@ -702,7 +702,8 @@ uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const {
void GotSection::finalizeContents() {
if (config->emachine == EM_PPC64 &&
- numEntries <= target->gotHeaderEntriesNum && !ctx.sym.globalOffsetTable)
+ numEntries <= ctx.target->gotHeaderEntriesNum &&
+ !ctx.sym.globalOffsetTable)
size = 0;
else
size = numEntries * config->wordsize;
@@ -711,15 +712,15 @@ void GotSection::finalizeContents() {
bool GotSection::isNeeded() const {
// Needed if the GOT symbol is used or the number of entries is more than just
// the header. A GOT with just the header may not be needed.
- return hasGotOffRel || numEntries > target->gotHeaderEntriesNum;
+ return hasGotOffRel || numEntries > ctx.target->gotHeaderEntriesNum;
}
void GotSection::writeTo(uint8_t *buf) {
// On PPC64 .got may be needed but empty. Skip the write.
if (size == 0)
return;
- target->writeGotHeader(buf);
- target->relocateAlloc(*this, buf);
+ ctx.target->writeGotHeader(buf);
+ ctx.target->relocateAlloc(*this, buf);
}
static uint64_t getMipsPageAddr(uint64_t addr) {
@@ -1018,7 +1019,7 @@ void MipsGotSection::build() {
// be allocated before us in the static TLS block.
if (s->isPreemptible || config->shared)
ctx.mainPart->relaDyn->addReloc(
- {target->tlsGotRel, this, offset,
+ {ctx.target->tlsGotRel, this, offset,
DynamicReloc::AgainstSymbolWithTargetVA, *s, 0, R_ABS});
}
for (std::pair<Symbol *, size_t> &p : got.dynTlsSymbols) {
@@ -1028,7 +1029,7 @@ void MipsGotSection::build() {
if (!config->shared)
continue;
ctx.mainPart->relaDyn->addReloc(
- {target->tlsModuleIndexRel, this, offset});
+ {ctx.target->tlsModuleIndexRel, this, offset});
} else {
// When building a shared library we still need a dynamic relocation
// for the module index. Therefore only checking for
@@ -1036,14 +1037,14 @@ void MipsGotSection::build() {
// thread-locals that have been marked as local through a linker script)
if (!s->isPreemptible && !config->shared)
continue;
- ctx.mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *this,
- offset, *s);
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsModuleIndexRel,
+ *this, offset, *s);
// However, we can skip writing the TLS offset reloc for non-preemptible
// symbols since it is known even in shared libraries
if (!s->isPreemptible)
continue;
offset += config->wordsize;
- ctx.mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *this,
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsOffsetRel, *this,
offset, *s);
}
}
@@ -1056,8 +1057,8 @@ void MipsGotSection::build() {
// Dynamic relocations for "global" entries.
for (const std::pair<Symbol *, size_t> &p : got.global) {
uint64_t offset = p.second * config->wordsize;
- ctx.mainPart->relaDyn->addSymbolReloc(target->relativeRel, *this, offset,
- *p.first);
+ ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->relativeRel, *this,
+ offset, *p.first);
}
if (!config->isPic)
continue;
@@ -1067,13 +1068,13 @@ void MipsGotSection::build() {
size_t pageCount = l.second.count;
for (size_t pi = 0; pi < pageCount; ++pi) {
uint64_t offset = (l.second.firstIndex + pi) * config->wordsize;
- ctx.mainPart->relaDyn->addReloc({target->relativeRel, this, offset,
+ ctx.mainPart->relaDyn->addReloc({ctx.target->relativeRel, this, offset,
l.first, int64_t(pi * 0x10000)});
}
}
for (const std::pair<GotEntry, size_t> &p : got.local16) {
uint64_t offset = p.second * config->wordsize;
- ctx.mainPart->relaDyn->addReloc({target->relativeRel, this, offset,
+ ctx.mainPart->relaDyn->addReloc({ctx.target->relativeRel, this, offset,
DynamicReloc::AddendOnlyWithTargetVA,
*p.first.first, p.first.second, R_ABS});
}
@@ -1180,16 +1181,16 @@ void GotPltSection::addEntry(Symbol &sym) {
}
size_t GotPltSection::getSize() const {
- return (target->gotPltHeaderEntriesNum + entries.size()) *
- target->gotEntrySize;
+ return (ctx.target->gotPltHeaderEntriesNum + entries.size()) *
+ ctx.target->gotEntrySize;
}
void GotPltSection::writeTo(uint8_t *buf) {
- target->writeGotPltHeader(buf);
- buf += target->gotPltHeaderEntriesNum * target->gotEntrySize;
+ ctx.target->writeGotPltHeader(buf);
+ buf += ctx.target->gotPltHeaderEntriesNum * ctx.target->gotEntrySize;
for (const Symbol *b : entries) {
- target->writeGotPlt(buf, *b);
- buf += target->gotEntrySize;
+ ctx.target->writeGotPlt(buf, *b);
+ buf += ctx.target->gotEntrySize;
}
}
@@ -1217,7 +1218,7 @@ static StringRef getIgotPltName() {
IgotPltSection::IgotPltSection()
: SyntheticSection(SHF_ALLOC | SHF_WRITE,
config->emachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
- target->gotEntrySize, getIgotPltName()) {}
+ ctx.target->gotEntrySize, getIgotPltName()) {}
void IgotPltSection::addEntry(Symbol &sym) {
assert(ctx.symAux.back().pltIdx == entries.size());
@@ -1225,13 +1226,13 @@ void IgotPltSection::addEntry(Symbol &sym) {
}
size_t IgotPltSection::getSize() const {
- return entries.size() * target->gotEntrySize;
+ return entries.size() * ctx.target->gotEntrySize;
}
void IgotPltSection::writeTo(uint8_t *buf) {
for (const Symbol *b : entries) {
- target->writeIgotPlt(buf, *b);
- buf += target->gotEntrySize;
+ ctx.target->writeIgotPlt(buf, *b);
+ buf += ctx.target->gotEntrySize;
}
}
@@ -1444,15 +1445,15 @@ DynamicSection<ELFT>::computeContents() {
break;
case EM_AARCH64:
if (llvm::find_if(in.relaPlt->relocs, [](const DynamicReloc &r) {
- return r.type == target->pltRel &&
- r.sym->stOther & STO_AARCH64_VARIANT_PCS;
+ return r.type == ctx.target->pltRel &&
+ r.sym->stOther & STO_AARCH64_VARIANT_PCS;
}) != in.relaPlt->relocs.end())
addInt(DT_AARCH64_VARIANT_PCS, 0);
addInSec(DT_PLTGOT, *in.gotPlt);
break;
case EM_RISCV:
if (llvm::any_of(in.relaPlt->relocs, [](const DynamicReloc &r) {
- return r.type == target->pltRel &&
+ return r.type == ctx.target->pltRel &&
(r.sym->stOther & STO_RISCV_VARIANT_CC);
}))
addInt(DT_RISCV_VARIANT_CC, 0);
@@ -1534,7 +1535,7 @@ DynamicSection<ELFT>::computeContents() {
if (config->emachine == EM_MIPS) {
addInt(DT_MIPS_RLD_VERSION, 1);
addInt(DT_MIPS_FLAGS, RHF_NOTPOT);
- addInt(DT_MIPS_BASE_ADDRESS, target->getImageBase());
+ addInt(DT_MIPS_BASE_ADDRESS, ctx.target->getImageBase());
addInt(DT_MIPS_SYMTABNO, part.dynSymTab->getNumSymbols());
addInt(DT_MIPS_LOCAL_GOTNO, in.mipsGot->getLocalEntriesNum());
@@ -1562,7 +1563,7 @@ DynamicSection<ELFT>::computeContents() {
if (config->emachine == EM_PPC64 && in.plt->isNeeded()) {
// The Glink tag points to 32 bytes before the first lazy symbol resolution
// stub, which starts directly after the header.
- addInt(DT_PPC64_GLINK, in.plt->getVA() + target->pltHeaderSize - 32);
+ addInt(DT_PPC64_GLINK, in.plt->getVA() + ctx.target->pltHeaderSize - 32);
}
if (config->emachine == EM_PPC64)
@@ -1618,7 +1619,8 @@ uint32_t DynamicReloc::getSymIndex(SymbolTableBaseSection *symTab) const {
return 0;
size_t index = symTab->getSymbolIndex(*sym);
- assert((index != 0 || (type != target->gotRel && type != target->pltRel) ||
+ assert((index != 0 ||
+ (type != ctx.target->gotRel && type != ctx.target->pltRel) ||
!ctx.mainPart->dynSymTab->getParent()) &&
"GOT or PLT relocation must refer to symbol in dynamic symbol table");
return index;
@@ -1637,7 +1639,7 @@ void RelocationBaseSection::addSymbolReloc(
RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym,
int64_t addend, std::optional<RelType> addendRelType) {
addReloc(DynamicReloc::AgainstSymbol, dynType, isec, offsetInSec, sym, addend,
- R_ADDEND, addendRelType ? *addendRelType : target->noneRel);
+ R_ADDEND, addendRelType ? *addendRelType : ctx.target->noneRel);
}
void RelocationBaseSection::addAddendOnlyRelocIfNonPreemptible(
@@ -1665,7 +1667,7 @@ void RelocationBaseSection::mergeRels() {
void RelocationBaseSection::partitionRels() {
if (!combreloc)
return;
- const RelType relativeRel = target->relativeRel;
+ const RelType relativeRel = ctx.target->relativeRel;
numRelativeRelocs =
std::stable_partition(relocs.begin(), relocs.end(),
[=](auto &r) { return r.type == relativeRel; }) -
@@ -1703,7 +1705,7 @@ void RelocationBaseSection::computeRels() {
auto irelative = std::stable_partition(
relocs.begin() + numRelativeRelocs, relocs.end(),
- [t = target->iRelativeRel](auto &r) { return r.type != t; });
+ [t = ctx.target->iRelativeRel](auto &r) { return r.type != t; });
// Sort by (!IsRelative,SymIndex,r_offset). DT_REL[A]COUNT requires us to
// place R_*_RELATIVE first. SymIndex is to improve locality, while r_offset
@@ -1839,7 +1841,7 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
rel.type, false);
r.r_addend = config->isRela ? rel.computeAddend() : 0;
- if (r.getType(config->isMips64EL) == target->relativeRel)
+ if (r.getType(config->isMips64EL) == ctx.target->relativeRel)
relatives.push_back(r);
else
nonRelatives.push_back(r);
@@ -1937,7 +1939,7 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
add(g[0].r_offset - offset);
- add(target->relativeRel);
+ add(ctx.target->relativeRel);
if (config->isRela) {
add(g[0].r_addend - addend);
addend = g[0].r_addend;
@@ -1948,7 +1950,7 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
add(config->wordsize);
- add(target->relativeRel);
+ add(ctx.target->relativeRel);
if (config->isRela) {
for (const auto &i : llvm::drop_begin(g)) {
add(i.r_addend - addend);
@@ -1963,7 +1965,7 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
if (!ungroupedRelatives.empty()) {
add(ungroupedRelatives.size());
add(RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
- add(target->relativeRel);
+ add(ctx.target->relativeRel);
for (Elf_Rela &r : ungroupedRelatives) {
add(r.r_offset - offset);
offset = r.r_offset;
@@ -2538,7 +2540,7 @@ void HashTableSection::writeTo(uint8_t *buf) {
PltSection::PltSection()
: SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt"),
- headerSize(target->pltHeaderSize) {
+ headerSize(ctx.target->pltHeaderSize) {
// On PowerPC, this section contains lazy symbol resolvers.
if (config->emachine == EM_PPC64) {
name = ".glink";
@@ -2560,12 +2562,12 @@ PltSection::PltSection()
void PltSection::writeTo(uint8_t *buf) {
// At beginning of PLT, we have code to call the dynamic
// linker to resolve dynsyms at runtime. Write such code.
- target->writePltHeader(buf);
+ ctx.target->writePltHeader(buf);
size_t off = headerSize;
for (const Symbol *sym : entries) {
- target->writePlt(buf + off, *sym, getVA() + off);
- off += target->pltEntrySize;
+ ctx.target->writePlt(buf + off, *sym, getVA() + off);
+ off += ctx.target->pltEntrySize;
}
}
@@ -2576,7 +2578,7 @@ void PltSection::addEntry(Symbol &sym) {
}
size_t PltSection::getSize() const {
- return headerSize + entries.size() * target->pltEntrySize;
+ return headerSize + entries.size() * ctx.target->pltEntrySize;
}
bool PltSection::isNeeded() const {
@@ -2587,12 +2589,12 @@ bool PltSection::isNeeded() const {
// Used by ARM to add mapping symbols in the PLT section, which aid
// disassembly.
void PltSection::addSymbols() {
- target->addPltHeaderSymbols(*this);
+ ctx.target->addPltHeaderSymbols(*this);
size_t off = headerSize;
for (size_t i = 0; i < entries.size(); ++i) {
- target->addPltSymbols(*this, off);
- off += target->pltEntrySize;
+ ctx.target->addPltSymbols(*this, off);
+ off += ctx.target->pltEntrySize;
}
}
@@ -2607,13 +2609,13 @@ IpltSection::IpltSection()
void IpltSection::writeTo(uint8_t *buf) {
uint32_t off = 0;
for (const Symbol *sym : entries) {
- target->writeIplt(buf + off, *sym, getVA() + off);
- off += target->ipltEntrySize;
+ ctx.target->writeIplt(buf + off, *sym, getVA() + off);
+ off += ctx.target->ipltEntrySize;
}
}
size_t IpltSection::getSize() const {
- return entries.size() * target->ipltEntrySize;
+ return entries.size() * ctx.target->ipltEntrySize;
}
void IpltSection::addEntry(Symbol &sym) {
@@ -2626,8 +2628,8 @@ void IpltSection::addEntry(Symbol &sym) {
void IpltSection::addSymbols() {
size_t off = 0;
for (size_t i = 0, e = entries.size(); i != e; ++i) {
- target->addPltSymbols(*this, off);
- off += target->pltEntrySize;
+ ctx.target->addPltSymbols(*this, off);
+ off += ctx.target->pltEntrySize;
}
}
@@ -2641,7 +2643,7 @@ void PPC32GlinkSection::writeTo(uint8_t *buf) {
}
size_t PPC32GlinkSection::getSize() const {
- return headerSize + entries.size() * target->pltEntrySize + footerSize;
+ return headerSize + entries.size() * ctx.target->pltEntrySize + footerSize;
}
// This is an x86-only extra PLT section and used only when a security
@@ -2706,12 +2708,12 @@ IBTPltSection::IBTPltSection()
: SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt") {}
void IBTPltSection::writeTo(uint8_t *buf) {
- target->writeIBTPlt(buf, in.plt->getNumEntries());
+ ctx.target->writeIBTPlt(buf, in.plt->getNumEntries());
}
size_t IBTPltSection::getSize() const {
// 16 is the header size of .plt.
- return 16 + in.plt->getNumEntries() * target->pltEntrySize;
+ return 16 + in.plt->getNumEntries() * ctx.target->pltEntrySize;
}
bool IBTPltSection::isNeeded() const { return in.plt->getNumEntries() > 0; }
@@ -4185,7 +4187,7 @@ void ARMExidxSyntheticSection::writeTo(uint8_t *buf) {
// Recalculate outSecOff as finalizeAddressDependentContent()
// may have altered syntheticSection outSecOff.
d->outSecOff = offset + outSecOff;
- target->relocateAlloc(*d, buf + offset);
+ ctx.target->relocateAlloc(*d, buf + offset);
offset += d->getSize();
} else {
// A Linker generated CANTUNWIND section.
@@ -4193,7 +4195,7 @@ void ARMExidxSyntheticSection::writeTo(uint8_t *buf) {
write32(buf + offset + 4, 0x1);
uint64_t s = isec->getVA();
uint64_t p = getVA() + offset;
- target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
+ ctx.target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
offset += 8;
}
}
@@ -4202,7 +4204,7 @@ void ARMExidxSyntheticSection::writeTo(uint8_t *buf) {
write32(buf + offset + 4, 0x1);
uint64_t s = sentinel->getVA(sentinel->getSize());
uint64_t p = getVA() + offset;
- target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
+ ctx.target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
assert(size == offset + 8);
}
@@ -4866,7 +4868,7 @@ template <class ELFT> void elf::createSyntheticSections() {
// _GLOBAL_OFFSET_TABLE_ is defined relative to either .got.plt or .got. Treat
// it as a relocation and ensure the referenced section is created.
if (ctx.sym.globalOffsetTable && config->emachine != EM_MIPS) {
- if (target->gotBaseSymInGotPlt)
+ if (ctx.target->gotBaseSymInGotPlt)
in.gotPlt->hasGotPltOffRel = true;
else
in.got->hasGotOffRel = true;
diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp
index 584e9270469d00..a1f2229ad131fe 100644
--- a/lld/ELF/Target.cpp
+++ b/lld/ELF/Target.cpp
@@ -38,8 +38,6 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-const TargetInfo *elf::target;
-
std::string lld::toString(RelType type) {
StringRef s = getELFRelocationTypeName(elf::config->emachine, type);
if (s == "Unknown")
diff --git a/lld/ELF/Target.h b/lld/ELF/Target.h
index 0cefa318135662..9894fb32c503c3 100644
--- a/lld/ELF/Target.h
+++ b/lld/ELF/Target.h
@@ -241,7 +241,6 @@ void convertArmInstructionstoBE8(InputSection *sec, uint8_t *buf);
void createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files);
void initSymbolAnchors();
-LLVM_LIBRARY_VISIBILITY extern const TargetInfo *target;
TargetInfo *getTarget();
template <class ELFT> bool isMipsPIC(const Defined *sym);
diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index 478d956f43d9b1..fe83c086d84322 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -475,9 +475,10 @@ class PPC64PILongBranchThunk final : public PPC64LongBranchThunk {
if (std::optional<uint32_t> index =
in.ppc64LongBranchTarget->addEntry(&dest, addend)) {
ctx.mainPart->relaDyn->addRelativeReloc(
- target->relativeRel, *in.ppc64LongBranchTarget, *index * UINT64_C(8),
- dest, addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther),
- target->symbolicRel, R_ABS);
+ ctx.target->relativeRel, *in.ppc64LongBranchTarget,
+ *index * UINT64_C(8), dest,
+ addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther),
+ ctx.target->symbolicRel, R_ABS);
}
}
};
@@ -528,7 +529,7 @@ void AArch64Thunk::writeTo(uint8_t *buf) {
uint64_t s = getAArch64ThunkDestVA(destination, addend);
uint64_t p = getThunkTargetSym()->getVA();
write32(buf, 0x14000000); // b S
- target->relocateNoSym(buf, R_AARCH64_CALL26, s - p);
+ ctx.target->relocateNoSym(buf, R_AARCH64_CALL26, s - p);
}
// AArch64 long range Thunks.
@@ -541,7 +542,7 @@ void AArch64ABSLongThunk::writeLong(uint8_t *buf) {
};
uint64_t s = getAArch64ThunkDestVA(destination, addend);
memcpy(buf, data, sizeof(data));
- target->relocateNoSym(buf + 8, R_AARCH64_ABS64, s);
+ ctx.target->relocateNoSym(buf + 8, R_AARCH64_ABS64, s);
}
void AArch64ABSLongThunk::addSymbols(ThunkSection &isec) {
@@ -566,9 +567,9 @@ void AArch64ADRPThunk::writeLong(uint8_t *buf) {
uint64_t s = getAArch64ThunkDestVA(destination, addend);
uint64_t p = getThunkTargetSym()->getVA();
memcpy(buf, data, sizeof(data));
- target->relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
- getAArch64Page(s) - getAArch64Page(p));
- target->relocateNoSym(buf + 4, R_AARCH64_ADD_ABS_LO12_NC, s);
+ ctx.target->relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
+ getAArch64Page(s) - getAArch64Page(p));
+ ctx.target->relocateNoSym(buf + 4, R_AARCH64_ADD_ABS_LO12_NC, s);
}
void AArch64ADRPThunk::addSymbols(ThunkSection &isec) {
@@ -609,7 +610,7 @@ void ARMThunk::writeTo(uint8_t *buf) {
uint64_t p = getThunkTargetSym()->getVA();
int64_t offset = s - p - 8;
write32(buf, 0xea000000); // b S
- target->relocateNoSym(buf, R_ARM_JUMP24, offset);
+ ctx.target->relocateNoSym(buf, R_ARM_JUMP24, offset);
}
bool ARMThunk::isCompatibleWith(const InputSection &isec,
@@ -653,7 +654,7 @@ void ThumbThunk::writeTo(uint8_t *buf) {
int64_t offset = s - p - 4;
write16(buf + 0, 0xf000); // b.w S
write16(buf + 2, 0xb000);
- target->relocateNoSym(buf, R_ARM_THM_JUMP24, offset);
+ ctx.target->relocateNoSym(buf, R_ARM_THM_JUMP24, offset);
}
bool ThumbThunk::isCompatibleWith(const InputSection &isec,
@@ -671,8 +672,8 @@ void ARMV7ABSLongThunk::writeLong(uint8_t *buf) {
write32(buf + 4, 0xe340c000); // movt ip,:upper16:S
write32(buf + 8, 0xe12fff1c); // bx ip
uint64_t s = getARMThunkDestVA(destination);
- target->relocateNoSym(buf, R_ARM_MOVW_ABS_NC, s);
- target->relocateNoSym(buf + 4, R_ARM_MOVT_ABS, s);
+ ctx.target->relocateNoSym(buf, R_ARM_MOVW_ABS_NC, s);
+ ctx.target->relocateNoSym(buf + 4, R_ARM_MOVT_ABS, s);
}
void ARMV7ABSLongThunk::addSymbols(ThunkSection &isec) {
@@ -688,8 +689,8 @@ void ThumbV7ABSLongThunk::writeLong(uint8_t *buf) {
write16(buf + 6, 0x0c00);
write16(buf + 8, 0x4760); // bx ip
uint64_t s = getARMThunkDestVA(destination);
- target->relocateNoSym(buf, R_ARM_THM_MOVW_ABS_NC, s);
- target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_ABS, s);
+ ctx.target->relocateNoSym(buf, R_ARM_THM_MOVW_ABS_NC, s);
+ ctx.target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_ABS, s);
}
void ThumbV7ABSLongThunk::addSymbols(ThunkSection &isec) {
@@ -706,8 +707,8 @@ void ARMV7PILongThunk::writeLong(uint8_t *buf) {
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA();
int64_t offset = s - p - 16;
- target->relocateNoSym(buf, R_ARM_MOVW_PREL_NC, offset);
- target->relocateNoSym(buf + 4, R_ARM_MOVT_PREL, offset);
+ ctx.target->relocateNoSym(buf, R_ARM_MOVW_PREL_NC, offset);
+ ctx.target->relocateNoSym(buf + 4, R_ARM_MOVT_PREL, offset);
}
void ARMV7PILongThunk::addSymbols(ThunkSection &isec) {
@@ -726,8 +727,8 @@ void ThumbV7PILongThunk::writeLong(uint8_t *buf) {
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
int64_t offset = s - p - 12;
- target->relocateNoSym(buf, R_ARM_THM_MOVW_PREL_NC, offset);
- target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_PREL, offset);
+ ctx.target->relocateNoSym(buf, R_ARM_THM_MOVW_PREL_NC, offset);
+ ctx.target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_PREL, offset);
}
void ThumbV7PILongThunk::addSymbols(ThunkSection &isec) {
@@ -747,7 +748,7 @@ void ThumbV6MABSLongThunk::writeLong(uint8_t *buf) {
write16(buf + 6, 0xbd01); // pop {r0, pc} ; restore r0 and branch to dest
write32(buf + 8, 0x00000000); // L1: .word S
uint64_t s = getARMThunkDestVA(destination);
- target->relocateNoSym(buf + 8, R_ARM_ABS32, s);
+ ctx.target->relocateNoSym(buf + 8, R_ARM_ABS32, s);
}
void ThumbV6MABSLongThunk::addSymbols(ThunkSection &isec) {
@@ -774,10 +775,10 @@ void ThumbV6MABSXOLongThunk::writeLong(uint8_t *buf) {
write16(buf + 16, 0x9001); // str r0, [sp, #4] ; SP + 4 = S
write16(buf + 18, 0xbd01); // pop {r0, pc} ; restore r0 and branch to dest
uint64_t s = getARMThunkDestVA(destination);
- target->relocateNoSym(buf + 2, R_ARM_THM_ALU_ABS_G3, s);
- target->relocateNoSym(buf + 6, R_ARM_THM_ALU_ABS_G2_NC, s);
- target->relocateNoSym(buf + 10, R_ARM_THM_ALU_ABS_G1_NC, s);
- target->relocateNoSym(buf + 14, R_ARM_THM_ALU_ABS_G0_NC, s);
+ ctx.target->relocateNoSym(buf + 2, R_ARM_THM_ALU_ABS_G3, s);
+ ctx.target->relocateNoSym(buf + 6, R_ARM_THM_ALU_ABS_G2_NC, s);
+ ctx.target->relocateNoSym(buf + 10, R_ARM_THM_ALU_ABS_G1_NC, s);
+ ctx.target->relocateNoSym(buf + 14, R_ARM_THM_ALU_ABS_G0_NC, s);
}
void ThumbV6MABSXOLongThunk::addSymbols(ThunkSection &isec) {
@@ -799,7 +800,7 @@ void ThumbV6MPILongThunk::writeLong(uint8_t *buf) {
write32(buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 4)
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
- target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
+ ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
}
void ThumbV6MPILongThunk::addSymbols(ThunkSection &isec) {
@@ -813,7 +814,8 @@ void ThumbV6MPILongThunk::addSymbols(ThunkSection &isec) {
void ARMV5LongLdrPcThunk::writeLong(uint8_t *buf) {
write32(buf + 0, 0xe51ff004); // ldr pc, [pc,#-4] ; L1
write32(buf + 4, 0x00000000); // L1: .word S
- target->relocateNoSym(buf + 4, R_ARM_ABS32, getARMThunkDestVA(destination));
+ ctx.target->relocateNoSym(buf + 4, R_ARM_ABS32,
+ getARMThunkDestVA(destination));
}
void ARMV5LongLdrPcThunk::addSymbols(ThunkSection &isec) {
@@ -828,7 +830,8 @@ void ARMV4ABSLongBXThunk::writeLong(uint8_t *buf) {
write32(buf + 0, 0xe59fc000); // ldr r12, [pc] ; L1
write32(buf + 4, 0xe12fff1c); // bx r12
write32(buf + 8, 0x00000000); // L1: .word S
- target->relocateNoSym(buf + 8, R_ARM_ABS32, getARMThunkDestVA(destination));
+ ctx.target->relocateNoSym(buf + 8, R_ARM_ABS32,
+ getARMThunkDestVA(destination));
}
void ARMV4ABSLongBXThunk::addSymbols(ThunkSection &isec) {
@@ -844,7 +847,8 @@ void ThumbV4ABSLongBXThunk::writeLong(uint8_t *buf) {
write16(buf + 2, 0xe7fd); // b #-6 ; Arm recommended sequence to follow bx pc
write32(buf + 4, 0xe51ff004); // ldr pc, [pc, #-4] ; L1
write32(buf + 8, 0x00000000); // L1: .word S
- target->relocateNoSym(buf + 8, R_ARM_ABS32, getARMThunkDestVA(destination));
+ ctx.target->relocateNoSym(buf + 8, R_ARM_ABS32,
+ getARMThunkDestVA(destination));
}
void ThumbV4ABSLongBXThunk::addSymbols(ThunkSection &isec) {
@@ -862,7 +866,8 @@ void ThumbV4ABSLongThunk::writeLong(uint8_t *buf) {
write32(buf + 4, 0xe59fc000); // ldr r12, [pc] ; L1
write32(buf + 8, 0xe12fff1c); // bx r12
write32(buf + 12, 0x00000000); // L1: .word S
- target->relocateNoSym(buf + 12, R_ARM_ABS32, getARMThunkDestVA(destination));
+ ctx.target->relocateNoSym(buf + 12, R_ARM_ABS32,
+ getARMThunkDestVA(destination));
}
void ThumbV4ABSLongThunk::addSymbols(ThunkSection &isec) {
@@ -881,7 +886,7 @@ void ARMV4PILongBXThunk::writeLong(uint8_t *buf) {
write32(buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
- target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
+ ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
}
void ARMV4PILongBXThunk::addSymbols(ThunkSection &isec) {
@@ -898,7 +903,7 @@ void ARMV4PILongThunk::writeLong(uint8_t *buf) {
write32(buf + 8, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
- target->relocateNoSym(buf + 8, R_ARM_REL32, s - p - 12);
+ ctx.target->relocateNoSym(buf + 8, R_ARM_REL32, s - p - 12);
}
void ARMV4PILongThunk::addSymbols(ThunkSection &isec) {
@@ -917,7 +922,7 @@ void ThumbV4PILongBXThunk::writeLong(uint8_t *buf) {
write32(buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
- target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 16);
+ ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 16);
}
void ThumbV4PILongBXThunk::addSymbols(ThunkSection &isec) {
@@ -938,7 +943,7 @@ void ThumbV4PILongThunk::writeLong(uint8_t *buf) {
write32(buf + 16, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
uint64_t s = getARMThunkDestVA(destination);
uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
- target->relocateNoSym(buf + 16, R_ARM_REL32, s - p - 16);
+ ctx.target->relocateNoSym(buf + 16, R_ARM_REL32, s - p - 16);
}
void ThumbV4PILongThunk::addSymbols(ThunkSection &isec) {
@@ -953,7 +958,7 @@ void ThumbV4PILongThunk::addSymbols(ThunkSection &isec) {
// Use the long jump which covers a range up to 8MiB.
void AVRThunk::writeTo(uint8_t *buf) {
write32(buf, 0x940c); // jmp func
- target->relocateNoSym(buf, R_AVR_CALL, destination.getVA());
+ ctx.target->relocateNoSym(buf, R_AVR_CALL, destination.getVA());
}
void AVRThunk::addSymbols(ThunkSection &isec) {
@@ -968,8 +973,8 @@ void MipsThunk::writeTo(uint8_t *buf) {
write32(buf + 4, 0x08000000 | (s >> 2)); // j func
write32(buf + 8, 0x27390000); // addiu $25, $25, %lo(func)
write32(buf + 12, 0x00000000); // nop
- target->relocateNoSym(buf, R_MIPS_HI16, s);
- target->relocateNoSym(buf + 8, R_MIPS_LO16, s);
+ ctx.target->relocateNoSym(buf, R_MIPS_HI16, s);
+ ctx.target->relocateNoSym(buf + 8, R_MIPS_LO16, s);
}
void MipsThunk::addSymbols(ThunkSection &isec) {
@@ -990,9 +995,9 @@ void MicroMipsThunk::writeTo(uint8_t *buf) {
write16(buf + 4, 0xd400); // j func
write16(buf + 8, 0x3339); // addiu $25, $25, %lo(func)
write16(buf + 12, 0x0c00); // nop
- target->relocateNoSym(buf, R_MICROMIPS_HI16, s);
- target->relocateNoSym(buf + 4, R_MICROMIPS_26_S1, s);
- target->relocateNoSym(buf + 8, R_MICROMIPS_LO16, s);
+ ctx.target->relocateNoSym(buf, R_MICROMIPS_HI16, s);
+ ctx.target->relocateNoSym(buf + 4, R_MICROMIPS_26_S1, s);
+ ctx.target->relocateNoSym(buf + 8, R_MICROMIPS_LO16, s);
}
void MicroMipsThunk::addSymbols(ThunkSection &isec) {
@@ -1015,9 +1020,9 @@ void MicroMipsR6Thunk::writeTo(uint8_t *buf) {
write16(buf, 0x1320); // lui $25, %hi(func)
write16(buf + 4, 0x3339); // addiu $25, $25, %lo(func)
write16(buf + 8, 0x9400); // bc func
- target->relocateNoSym(buf, R_MICROMIPS_HI16, s);
- target->relocateNoSym(buf + 4, R_MICROMIPS_LO16, s);
- target->relocateNoSym(buf + 8, R_MICROMIPS_PC26_S1, s - p - 12);
+ ctx.target->relocateNoSym(buf, R_MICROMIPS_HI16, s);
+ ctx.target->relocateNoSym(buf + 4, R_MICROMIPS_LO16, s);
+ ctx.target->relocateNoSym(buf + 8, R_MICROMIPS_PC26_S1, s - p - 12);
}
void MicroMipsR6Thunk::addSymbols(ThunkSection &isec) {
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 087804b43918ab..0165253551714c 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -826,7 +826,7 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
// The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
// to the start of the .got or .got.plt section.
InputSection *sec = in.gotPlt.get();
- if (!target->gotBaseSymInGotPlt)
+ if (!ctx.target->gotBaseSymInGotPlt)
sec = in.mipsGot ? cast<InputSection>(in.mipsGot.get())
: cast<InputSection>(in.got.get());
ctx.sym.globalOffsetTable->section = sec;
@@ -1177,8 +1177,8 @@ sortISDBySectionOrder(InputSectionDescription *isd,
// cover most cases).
size_t insPt = 0;
if (executableOutputSection && !orderedSections.empty() &&
- target->getThunkSectionSpacing() &&
- totalSize >= target->getThunkSectionSpacing()) {
+ ctx.target->getThunkSectionSpacing() &&
+ totalSize >= ctx.target->getThunkSectionSpacing()) {
uint64_t unorderedPos = 0;
for (; insPt != unorderedSections.size(); ++insPt) {
unorderedPos += unorderedSections[insPt]->getSize();
@@ -1455,9 +1455,9 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
uint32_t pass = 0, assignPasses = 0;
for (;;) {
- bool changed = target->needsThunks
+ bool changed = ctx.target->needsThunks
? tc.createThunks(pass, ctx.outputSections)
- : target->relaxOnce(pass);
+ : ctx.target->relaxOnce(pass);
bool spilled = ctx.script->spillSections();
changed |= spilled;
++pass;
@@ -1465,8 +1465,8 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
// With Thunk Size much smaller than branch range we expect to
// converge quickly; if we get to 30 something has gone wrong.
if (changed && pass >= 30) {
- error(target->needsThunks ? "thunk creation not converged"
- : "relaxation not converged");
+ error(ctx.target->needsThunks ? "thunk creation not converged"
+ : "relaxation not converged");
break;
}
@@ -1541,7 +1541,7 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
}
}
if (!config->relocatable)
- target->finalizeRelax(pass);
+ ctx.target->finalizeRelax(pass);
if (config->relocatable)
for (OutputSection *sec : ctx.outputSections)
@@ -1632,7 +1632,7 @@ template <class ELFT> void Writer<ELFT>::optimizeBasicBlockJumps() {
for (size_t i = 0, e = sections.size(); i != e; ++i) {
InputSection *next = i + 1 < sections.size() ? sections[i + 1] : nullptr;
InputSection &sec = *sections[i];
- numDeleted += target->deleteFallThruJmpInsn(sec, sec.file, next);
+ numDeleted += ctx.target->deleteFallThruJmpInsn(sec, sec.file, next);
}
if (numDeleted > 0) {
ctx.script->assignAddresses();
@@ -2803,7 +2803,7 @@ template <class ELFT> void Writer<ELFT>::writeSectionsBinary() {
static void fillTrap(uint8_t *i, uint8_t *end) {
for (; i + 4 <= end; i += 4)
- memcpy(i, &target->trapInstr, 4);
+ memcpy(i, &ctx.target->trapInstr, 4);
}
// Fill the last page of executable segments with trap instructions
More information about the llvm-commits
mailing list