[lld] e88b7ff - [ELF] Move InStruct into Ctx. NFC
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Sun Sep 15 22:15:07 PDT 2024
Author: Fangrui Song
Date: 2024-09-15T22:15:02-07:00
New Revision: e88b7ff01699a94b6458be942ff1b258f70efc2e
URL: https://github.com/llvm/llvm-project/commit/e88b7ff01699a94b6458be942ff1b258f70efc2e
DIFF: https://github.com/llvm/llvm-project/commit/e88b7ff01699a94b6458be942ff1b258f70efc2e.diff
LOG: [ELF] Move InStruct into Ctx. NFC
Ctx was introduced in March 2022 as a more suitable place for such
singletons.
llvm/Support/thread.h includes <thread>, which transitively includes
sstream in libc++ and uses ios_base::in, so we cannot use `#define in ctx.sec`.
`symtab, config, ctx` are now the only variables using
LLVM_LIBRARY_VISIBILITY.
Added:
Modified:
lld/ELF/Arch/AArch64.cpp
lld/ELF/Arch/ARM.cpp
lld/ELF/Arch/Hexagon.cpp
lld/ELF/Arch/LoongArch.cpp
lld/ELF/Arch/Mips.cpp
lld/ELF/Arch/PPC.cpp
lld/ELF/Arch/PPC64.cpp
lld/ELF/Arch/RISCV.cpp
lld/ELF/Arch/SPARCV9.cpp
lld/ELF/Arch/SystemZ.cpp
lld/ELF/Arch/X86.cpp
lld/ELF/Arch/X86_64.cpp
lld/ELF/Config.h
lld/ELF/Driver.cpp
lld/ELF/InputFiles.cpp
lld/ELF/InputSection.cpp
lld/ELF/LinkerScript.cpp
lld/ELF/OutputSections.cpp
lld/ELF/Relocations.cpp
lld/ELF/Symbols.cpp
lld/ELF/SyntheticSections.cpp
lld/ELF/SyntheticSections.h
lld/ELF/Thunks.cpp
lld/ELF/Writer.cpp
Removed:
################################################################################
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 7ed4bd2e307f05..36880bf67e9f23 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -322,7 +322,7 @@ int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {
}
void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
- write64(buf, in.plt->getVA());
+ write64(buf, ctx.in.plt->getVA());
}
void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -343,8 +343,8 @@ void AArch64::writePltHeader(uint8_t *buf) const {
};
memcpy(buf, pltData, sizeof(pltData));
- uint64_t got = in.gotPlt->getVA();
- uint64_t plt = in.plt->getVA();
+ uint64_t got = ctx.in.gotPlt->getVA();
+ uint64_t plt = ctx.in.plt->getVA();
relocateNoSym(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
getAArch64Page(got + 16) - getAArch64Page(plt + 4));
relocateNoSym(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
@@ -1003,8 +1003,8 @@ void AArch64BtiPac::writePltHeader(uint8_t *buf) const {
};
const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
- uint64_t got = in.gotPlt->getVA();
- uint64_t plt = in.plt->getVA();
+ uint64_t got = ctx.in.gotPlt->getVA();
+ uint64_t plt = ctx.in.plt->getVA();
if (btiHeader) {
// PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C
diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 827ba3a6c68a14..12b576979512b6 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -204,7 +204,7 @@ RelType ARM::getDynRel(RelType type) const {
}
void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
- write32(buf, in.plt->getVA());
+ write32(buf, ctx.in.plt->getVA());
}
void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -223,8 +223,8 @@ static void writePltHeaderLong(uint8_t *buf) {
write32(buf + 20, 0xd4d4d4d4); // Pad to 32-byte boundary
write32(buf + 24, 0xd4d4d4d4); // Pad to 32-byte boundary
write32(buf + 28, 0xd4d4d4d4);
- uint64_t gotPlt = in.gotPlt->getVA();
- uint64_t l1 = in.plt->getVA() + 8;
+ uint64_t gotPlt = ctx.in.gotPlt->getVA();
+ uint64_t l1 = ctx.in.plt->getVA() + 8;
write32(buf + 16, gotPlt - l1 - 8);
}
@@ -249,7 +249,7 @@ void ARM::writePltHeader(uint8_t *buf) const {
// At 0x8, we want to jump to .got.plt, the -16 accounts for 8 bytes from
// `pc` in the add instruction and 8 bytes for the `lr` adjustment.
//
- uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 16;
+ uint64_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA() - 16;
assert(llvm::isUInt<32>(offset) && "This should always fit into a 32-bit offset");
write16(buf + 0, 0xb500);
// Split into two halves to support endianness correctly.
@@ -277,7 +277,7 @@ void ARM::writePltHeader(uint8_t *buf) const {
0xe5bef000, // ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4)
};
- uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4;
+ uint64_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA() - 4;
if (!llvm::isUInt<27>(offset)) {
// We cannot encode the Offset, use the long form.
writePltHeaderLong(buf);
diff --git a/lld/ELF/Arch/Hexagon.cpp b/lld/ELF/Arch/Hexagon.cpp
index abde3cd964917e..a492d0a630b46e 100644
--- a/lld/ELF/Arch/Hexagon.cpp
+++ b/lld/ELF/Arch/Hexagon.cpp
@@ -359,7 +359,7 @@ void Hexagon::writePltHeader(uint8_t *buf) const {
memcpy(buf, pltData, sizeof(pltData));
// Offset from PLT0 to the GOT.
- uint64_t off = in.gotPlt->getVA() - in.plt->getVA();
+ uint64_t off = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
relocateNoSym(buf, R_HEX_B32_PCREL_X, off);
relocateNoSym(buf + 4, R_HEX_6_PCREL_X, off);
}
diff --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 01e42a5867b7ef..0044afb92cd412 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -308,9 +308,9 @@ int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const {
void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const {
if (config->is64)
- write64le(buf, in.plt->getVA());
+ write64le(buf, ctx.in.plt->getVA());
else
- write32le(buf, in.plt->getVA());
+ write32le(buf, ctx.in.plt->getVA());
}
void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -341,7 +341,7 @@ void LoongArch::writePltHeader(uint8_t *buf) const {
// srli.[wd] $t1, $t1, (is64?1:2) ; t1 = &.got.plt[i] - &.got.plt[0]
// ld.[wd] $t0, $t0, Wordsize ; t0 = link_map
// jr $t3
- uint32_t offset = in.gotPlt->getVA() - in.plt->getVA();
+ uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
uint32_t sub = config->is64 ? SUB_D : SUB_W;
uint32_t ld = config->is64 ? LD_D : LD_W;
uint32_t addi = config->is64 ? ADDI_D : ADDI_W;
diff --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index 7c7137117d78e4..f5920c75ca76d5 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -205,7 +205,7 @@ template <class ELFT> RelType MIPS<ELFT>::getDynRel(RelType type) const {
template <class ELFT>
void MIPS<ELFT>::writeGotPlt(uint8_t *buf, const Symbol &) const {
- uint64_t va = in.plt->getVA();
+ uint64_t va = ctx.in.plt->getVA();
if (isMicroMips())
va |= 1;
write32(buf, va);
@@ -257,8 +257,8 @@ static void writeMicroRelocation16(uint8_t *loc, uint64_t v, uint8_t bitsSize,
template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
if (isMicroMips()) {
- uint64_t gotPlt = in.gotPlt->getVA();
- uint64_t plt = in.plt->getVA();
+ uint64_t gotPlt = ctx.in.gotPlt->getVA();
+ uint64_t plt = ctx.in.plt->getVA();
// Overwrite trap instructions written by Writer::writeTrapInstr.
memset(buf, 0, pltHeaderSize);
@@ -310,7 +310,7 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
write32(buf + 24, jalrInst); // jalr.hb $25 or jalr $25
write32(buf + 28, 0x2718fffe); // subu $24, $24, 2
- uint64_t gotPlt = in.gotPlt->getVA();
+ uint64_t gotPlt = ctx.in.gotPlt->getVA();
writeValue(buf, gotPlt + 0x8000, 16, 16);
writeValue(buf + 4, gotPlt, 16, 0);
writeValue(buf + 8, gotPlt, 16, 0);
diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index 186dcf229b6f6d..53f760e5304dc0 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -75,9 +75,10 @@ static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
// Create canonical PLT entries for non-PIE code. Compilers don't generate
// non-GOT-non-PLT relocations referencing external functions for -fpie/-fPIE.
- uint32_t glink = in.plt->getVA(); // VA of .glink
+ uint32_t glink = ctx.in.plt->getVA(); // VA of .glink
if (!config->isPic) {
- for (const Symbol *sym : cast<PPC32GlinkSection>(*in.plt).canonical_plts) {
+ for (const Symbol *sym :
+ cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts) {
writePPC32PltCallStub(buf, sym->getGotPltVA(), nullptr, 0);
buf += 16;
glink += 16;
@@ -101,10 +102,10 @@ void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
// Then write PLTresolve(), which has two forms: PIC and non-PIC. PLTresolve()
// computes the PLT index (by computing the distance from the landing b to
// itself) and calls _dl_runtime_resolve() (in glibc).
- uint32_t got = in.got->getVA();
+ uint32_t got = ctx.in.got->getVA();
const uint8_t *end = buf + 64;
if (config->isPic) {
- uint32_t afterBcl = 4 * in.plt->getNumEntries() + 12;
+ uint32_t afterBcl = 4 * ctx.in.plt->getNumEntries() + 12;
uint32_t gotBcl = got + 4 - (glink + afterBcl);
write32(buf + 0, 0x3d6b0000 | ha(afterBcl)); // addis r11,r11,1f-glink at ha
write32(buf + 4, 0x7c0802a6); // mflr r0
@@ -192,7 +193,8 @@ void PPC::writeGotHeader(uint8_t *buf) const {
void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const {
// Address of the symbol resolver stub in .glink .
- write32(buf, in.plt->getVA() + in.plt->headerSize + 4 * s.getPltIdx());
+ write32(buf,
+ ctx.in.plt->getVA() + ctx.in.plt->headerSize + 4 * s.getPltIdx());
}
bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 15abbfda664331..e7617ca0760908 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -210,7 +210,7 @@ uint64_t elf::getPPC64TocBase() {
// TOC starts where the first of these sections starts. We always create a
// .got when we see a relocation that uses it, so for us the start is always
// the .got.
- uint64_t tocVA = in.got->getVA();
+ uint64_t tocVA = ctx.in.got->getVA();
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment. Note that the glibc startup
@@ -1155,7 +1155,7 @@ void PPC64::writePltHeader(uint8_t *buf) const {
// The 'bcl' instruction will set the link register to the address of the
// following instruction ('mflr r11'). Here we store the offset from that
// instruction to the first entry in the GotPlt section.
- int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8);
+ int64_t gotPltOffset = ctx.in.gotPlt->getVA() - (ctx.in.plt->getVA() + 8);
write64(buf + 52, gotPltOffset);
}
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 2435864ce5a7f0..242189050e8cab 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -207,9 +207,9 @@ void RISCV::writeGotHeader(uint8_t *buf) const {
void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
if (config->is64)
- write64le(buf, in.plt->getVA());
+ write64le(buf, ctx.in.plt->getVA());
else
- write32le(buf, in.plt->getVA());
+ write32le(buf, ctx.in.plt->getVA());
}
void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -230,7 +230,7 @@ void RISCV::writePltHeader(uint8_t *buf) const {
// srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0]
// l[wd] t0, Wordsize(t0); t0 = link_map
// jr t3
- uint32_t offset = in.gotPlt->getVA() - in.plt->getVA();
+ uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
uint32_t load = config->is64 ? LD : LW;
write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
@@ -1178,8 +1178,8 @@ mergeAttributesSection(const SmallVector<InputSectionBase *, 0> §ions) {
unsigned firstStackAlignValue = 0, xlen = 0;
bool hasArch = false;
- in.riscvAttributes = std::make_unique<RISCVAttributesSection>();
- auto &merged = static_cast<RISCVAttributesSection &>(*in.riscvAttributes);
+ ctx.in.riscvAttributes = std::make_unique<RISCVAttributesSection>();
+ auto &merged = static_cast<RISCVAttributesSection &>(*ctx.in.riscvAttributes);
// Collect all tags values from attributes section.
const auto &attributesTags = RISCVAttrs::getRISCVAttributeTags();
diff --git a/lld/ELF/Arch/SPARCV9.cpp b/lld/ELF/Arch/SPARCV9.cpp
index 4ae742c6c4e9a4..f7f296c81f335d 100644
--- a/lld/ELF/Arch/SPARCV9.cpp
+++ b/lld/ELF/Arch/SPARCV9.cpp
@@ -188,7 +188,7 @@ void SPARCV9::writePlt(uint8_t *buf, const Symbol & /*sym*/,
};
memcpy(buf, pltData, sizeof(pltData));
- uint64_t off = pltEntryAddr - in.plt->getVA();
+ uint64_t off = pltEntryAddr - ctx.in.plt->getVA();
relocateNoSym(buf, R_SPARC_22, off);
relocateNoSym(buf + 4, R_SPARC_WDISP19, -(off + 4 - pltEntrySize));
}
diff --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index 293df50708952b..3d637477e59dfd 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -203,15 +203,15 @@ void SystemZ::writePltHeader(uint8_t *buf) const {
0x07, 0x00, // nopr
};
memcpy(buf, pltData, sizeof(pltData));
- uint64_t got = in.got->getVA();
- uint64_t plt = in.plt->getVA();
+ uint64_t got = ctx.in.got->getVA();
+ uint64_t plt = ctx.in.plt->getVA();
write32be(buf + 8, (got - plt - 6) >> 1);
}
void SystemZ::addPltHeaderSymbols(InputSection &isec) const {
// The PLT header needs a reference to _GLOBAL_OFFSET_TABLE_, so we
// must ensure the .got section is created even if otherwise unused.
- in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
+ ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
}
void SystemZ::writePlt(uint8_t *buf, const Symbol &sym,
@@ -228,8 +228,8 @@ void SystemZ::writePlt(uint8_t *buf, const Symbol &sym,
memcpy(buf, inst, sizeof(inst));
write32be(buf + 2, (sym.getGotPltVA() - pltEntryAddr) >> 1);
- write32be(buf + 24, (in.plt->getVA() - pltEntryAddr - 22) >> 1);
- write32be(buf + 28, in.relaPlt->entsize * sym.getPltIdx());
+ write32be(buf + 24, (ctx.in.plt->getVA() - pltEntryAddr - 22) >> 1);
+ write32be(buf + 28, ctx.in.relaPlt->entsize * sym.getPltIdx());
}
int64_t SystemZ::getImplicitAddend(const uint8_t *buf, RelType type) const {
diff --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 20b69adc12bc05..6fb36233343918 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -203,14 +203,14 @@ void X86::writePltHeader(uint8_t *buf) const {
0x90, 0x90, 0x90, 0x90, // nop
};
memcpy(buf, pltData, sizeof(pltData));
- uint32_t gotPlt = in.gotPlt->getVA();
+ uint32_t gotPlt = ctx.in.gotPlt->getVA();
write32le(buf + 2, gotPlt + 4);
write32le(buf + 8, gotPlt + 8);
}
void X86::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
+ unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
if (config->isPic) {
const uint8_t inst[] = {
0xff, 0xa3, 0, 0, 0, 0, // jmp *foo at GOT(%ebx)
@@ -218,7 +218,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym,
0xe9, 0, 0, 0, 0, // jmp .PLT0 at PC
};
memcpy(buf, inst, sizeof(inst));
- write32le(buf + 2, sym.getGotPltVA() - in.gotPlt->getVA());
+ write32le(buf + 2, sym.getGotPltVA() - ctx.in.gotPlt->getVA());
} else {
const uint8_t inst[] = {
0xff, 0x25, 0, 0, 0, 0, // jmp *foo at GOT
@@ -230,7 +230,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym,
}
write32le(buf + 7, relOff);
- write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16);
+ write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16);
}
int64_t X86::getImplicitAddend(const uint8_t *buf, RelType type) const {
@@ -532,7 +532,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; }
void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
uint64_t va =
- in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
+ ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
write32le(buf, va);
}
@@ -545,7 +545,7 @@ void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop
};
memcpy(buf, inst, sizeof(inst));
- write32le(buf + 6, sym.getGotPltVA() - in.gotPlt->getVA());
+ write32le(buf + 6, sym.getGotPltVA() - ctx.in.gotPlt->getVA());
return;
}
@@ -630,7 +630,7 @@ void RetpolinePic::writePltHeader(uint8_t *buf) const {
void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
+ unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
const uint8_t insn[] = {
0x50, // pushl %eax
0x8b, 0x83, 0, 0, 0, 0, // mov foo at GOT(%ebx), %eax
@@ -642,8 +642,8 @@ void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym,
};
memcpy(buf, insn, sizeof(insn));
- uint32_t ebx = in.gotPlt->getVA();
- unsigned off = pltEntryAddr - in.plt->getVA();
+ uint32_t ebx = ctx.in.gotPlt->getVA();
+ unsigned off = pltEntryAddr - ctx.in.plt->getVA();
write32le(buf + 3, sym.getGotPltVA() - ebx);
write32le(buf + 8, -off - 12 + 32);
write32le(buf + 13, -off - 17 + 18);
@@ -682,14 +682,14 @@ void RetpolineNoPic::writePltHeader(uint8_t *buf) const {
};
memcpy(buf, insn, sizeof(insn));
- uint32_t gotPlt = in.gotPlt->getVA();
+ uint32_t gotPlt = ctx.in.gotPlt->getVA();
write32le(buf + 2, gotPlt + 4);
write32le(buf + 8, gotPlt + 8);
}
void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
uint64_t pltEntryAddr) const {
- unsigned relOff = in.relaPlt->entsize * sym.getPltIdx();
+ unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
const uint8_t insn[] = {
0x50, // 0: pushl %eax
0xa1, 0, 0, 0, 0, // 1: mov foo_in_GOT, %eax
@@ -702,7 +702,7 @@ void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
};
memcpy(buf, insn, sizeof(insn));
- unsigned off = pltEntryAddr - in.plt->getVA();
+ unsigned off = pltEntryAddr - ctx.in.plt->getVA();
write32le(buf + 2, sym.getGotPltVA());
write32le(buf + 7, -off - 11 + 32);
write32le(buf + 12, -off - 16 + 17);
diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index 65a81fe12f8709..950bac8b655135 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -432,8 +432,8 @@ void X86_64::writePltHeader(uint8_t *buf) const {
0x0f, 0x1f, 0x40, 0x00, // nop
};
memcpy(buf, pltData, sizeof(pltData));
- uint64_t gotPlt = in.gotPlt->getVA();
- uint64_t plt = in.ibtPlt ? in.ibtPlt->getVA() : in.plt->getVA();
+ uint64_t gotPlt = ctx.in.gotPlt->getVA();
+ uint64_t plt = ctx.in.ibtPlt ? ctx.in.ibtPlt->getVA() : ctx.in.plt->getVA();
write32le(buf + 2, gotPlt - plt + 2); // GOTPLT+8
write32le(buf + 8, gotPlt - plt + 4); // GOTPLT+16
}
@@ -449,7 +449,7 @@ void X86_64::writePlt(uint8_t *buf, const Symbol &sym,
write32le(buf + 2, sym.getGotPltVA() - pltEntryAddr - 6);
write32le(buf + 7, sym.getPltIdx());
- write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16);
+ write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16);
}
RelType X86_64::getDynRel(RelType type) const {
@@ -1073,7 +1073,7 @@ IntelIBT::IntelIBT() { pltHeaderSize = 0; }
void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
uint64_t va =
- in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
+ ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
write64le(buf, va);
}
@@ -1162,8 +1162,8 @@ void Retpoline::writePltHeader(uint8_t *buf) const {
};
memcpy(buf, insn, sizeof(insn));
- uint64_t gotPlt = in.gotPlt->getVA();
- uint64_t plt = in.plt->getVA();
+ uint64_t gotPlt = ctx.in.gotPlt->getVA();
+ uint64_t plt = ctx.in.plt->getVA();
write32le(buf + 2, gotPlt - plt - 6 + 8);
write32le(buf + 9, gotPlt - plt - 13 + 16);
}
@@ -1180,7 +1180,7 @@ void Retpoline::writePlt(uint8_t *buf, const Symbol &sym,
};
memcpy(buf, insn, sizeof(insn));
- uint64_t off = pltEntryAddr - in.plt->getVA();
+ uint64_t off = pltEntryAddr - ctx.in.plt->getVA();
write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
write32le(buf + 8, -off - 12 + 32);
@@ -1221,7 +1221,7 @@ void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym,
memcpy(buf, insn, sizeof(insn));
write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
- write32le(buf + 8, in.plt->getVA() - pltEntryAddr - 12);
+ write32le(buf + 8, ctx.in.plt->getVA() - pltEntryAddr - 12);
}
static TargetInfo *getTargetInfo() {
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index 639bf9a4f22840..11bf0ec4d173ba 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -51,6 +51,26 @@ class TargetInfo;
struct Partition;
struct PhdrEntry;
+class BssSection;
+class GdbIndexSection;
+class GotPltSection;
+class GotSection;
+class IBTPltSection;
+class IgotPltSection;
+class InputSection;
+class IpltSection;
+class MipsGotSection;
+class MipsRldMapSection;
+class PPC32Got2Section;
+class PPC64LongBranchTargetSection;
+class PltSection;
+class RelocationBaseSection;
+class RelroPaddingSection;
+class StringTableSection;
+class SymbolTableBaseSection;
+class SymtabShndxSection;
+class SyntheticSection;
+
enum ELFKind : uint8_t {
ELFNoneKind,
ELF32LEKind,
@@ -483,6 +503,42 @@ struct DuplicateSymbol {
uint64_t value;
};
+// Linker generated sections which can be used as inputs and are not specific to
+// a partition.
+struct InStruct {
+ std::unique_ptr<InputSection> attributes;
+ std::unique_ptr<SyntheticSection> riscvAttributes;
+ std::unique_ptr<BssSection> bss;
+ std::unique_ptr<BssSection> bssRelRo;
+ std::unique_ptr<GotSection> got;
+ std::unique_ptr<GotPltSection> gotPlt;
+ std::unique_ptr<IgotPltSection> igotPlt;
+ std::unique_ptr<RelroPaddingSection> relroPadding;
+ std::unique_ptr<SyntheticSection> armCmseSGSection;
+ std::unique_ptr<PPC64LongBranchTargetSection> ppc64LongBranchTarget;
+ std::unique_ptr<SyntheticSection> mipsAbiFlags;
+ std::unique_ptr<MipsGotSection> mipsGot;
+ std::unique_ptr<SyntheticSection> mipsOptions;
+ std::unique_ptr<SyntheticSection> mipsReginfo;
+ std::unique_ptr<MipsRldMapSection> mipsRldMap;
+ std::unique_ptr<SyntheticSection> partEnd;
+ std::unique_ptr<SyntheticSection> partIndex;
+ std::unique_ptr<PltSection> plt;
+ std::unique_ptr<IpltSection> iplt;
+ std::unique_ptr<PPC32Got2Section> ppc32Got2;
+ std::unique_ptr<IBTPltSection> ibtPlt;
+ std::unique_ptr<RelocationBaseSection> relaPlt;
+ // Non-SHF_ALLOC sections
+ std::unique_ptr<SyntheticSection> debugNames;
+ std::unique_ptr<GdbIndexSection> gdbIndex;
+ std::unique_ptr<StringTableSection> shStrTab;
+ std::unique_ptr<StringTableSection> strTab;
+ std::unique_ptr<SymbolTableBaseSection> symTab;
+ std::unique_ptr<SymtabShndxSection> symTabShndx;
+
+ void reset();
+};
+
struct Ctx {
LinkerDriver driver;
LinkerScript *script;
@@ -504,6 +560,8 @@ struct Ctx {
SmallVector<OutputSection *, 0> outputSections;
std::vector<Partition> partitions;
+ InStruct in;
+
// Some linker-generated symbols need to be created as
// Defined symbols.
struct ElfSym {
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 54f9a7e02824f5..34b146759bea6f 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -103,6 +103,7 @@ void Ctx::reset() {
outputSections.clear();
partitions.clear();
+ ctx.in.reset();
sym = ElfSym{};
memoryBuffers.clear();
@@ -152,8 +153,6 @@ bool link(ArrayRef<const char *> args, llvm::raw_ostream &stdoutOS,
elf::ctx.partitions.emplace_back();
symtab = SymbolTable();
- in.reset();
-
SharedFile::vernauxNum = 0;
};
ctx->e.logName = args::getFilenameWithoutExe(args[0]);
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 76b04184854bb6..3f5f29591d2374 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -638,9 +638,9 @@ template <class ELFT> void ObjFile<ELFT>::parse(bool ignoreComdats) {
// dynamic loaders require the presence of an attribute section for
// dlopen to work. In a full implementation we would merge all attribute
// sections.
- if (in.attributes == nullptr) {
- in.attributes = std::make_unique<InputSection>(*this, sec, name);
- this->sections[i] = in.attributes.get();
+ if (ctx.in.attributes == nullptr) {
+ ctx.in.attributes = std::make_unique<InputSection>(*this, sec, name);
+ this->sections[i] = ctx.in.attributes.get();
}
}
}
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index 03b91804c81543..9601e6b3250cc0 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -452,7 +452,7 @@ void InputSection::copyRelocations(uint8_t *buf,
// Output section VA is zero for -r, so r_offset is an offset within the
// section, but for --emit-relocs it is a virtual address.
p->r_offset = sec->getVA(rel.offset);
- p->setSymbolAndType(in.symTab->getSymbolIndex(sym), type,
+ p->setSymbolAndType(ctx.in.symTab->getSymbolIndex(sym), type,
config->isMips64EL);
if (sym.type == STT_SECTION) {
@@ -744,20 +744,20 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// so we have to duplicate some logic here.
if (sym.hasFlag(NEEDS_TLSGD) && type != R_LARCH_TLS_IE_PC_LO12)
// Like R_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
- return in.got->getGlobalDynAddr(sym) + a;
+ return ctx.in.got->getGlobalDynAddr(sym) + a;
return getRelocTargetVA(file, type, a, p, sym, R_GOT);
case R_GOTONLY_PC:
- return in.got->getVA() + a - p;
+ return ctx.in.got->getVA() + a - p;
case R_GOTPLTONLY_PC:
- return in.gotPlt->getVA() + a - p;
+ return ctx.in.gotPlt->getVA() + a - p;
case R_GOTREL:
case R_PPC64_RELAX_TOC:
- return sym.getVA(a) - in.got->getVA();
+ return sym.getVA(a) - ctx.in.got->getVA();
case R_GOTPLTREL:
- return sym.getVA(a) - in.gotPlt->getVA();
+ return sym.getVA(a) - ctx.in.gotPlt->getVA();
case R_GOTPLT:
case R_RELAX_TLS_GD_TO_IE_GOTPLT:
- return sym.getGotVA() + a - in.gotPlt->getVA();
+ return sym.getGotVA() + a - ctx.in.gotPlt->getVA();
case R_TLSLD_GOT_OFF:
case R_GOT_OFF:
case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
@@ -766,22 +766,23 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p);
case R_AARCH64_GOT_PAGE:
- return sym.getGotVA() + a - getAArch64Page(in.got->getVA());
+ return sym.getGotVA() + a - getAArch64Page(ctx.in.got->getVA());
case R_GOT_PC:
case R_RELAX_TLS_GD_TO_IE:
return sym.getGotVA() + a - p;
case R_GOTPLT_GOTREL:
- return sym.getGotPltVA() + a - in.got->getVA();
+ return sym.getGotPltVA() + a - ctx.in.got->getVA();
case R_GOTPLT_PC:
return sym.getGotPltVA() + a - p;
case R_LOONGARCH_GOT_PAGE_PC:
if (sym.hasFlag(NEEDS_TLSGD))
- return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
+ return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p,
+ type);
return getLoongArchPageDelta(sym.getGotVA() + a, p, type);
case R_MIPS_GOTREL:
- return sym.getVA(a) - in.mipsGot->getGp(file);
+ return sym.getVA(a) - ctx.in.mipsGot->getGp(file);
case R_MIPS_GOT_GP:
- return in.mipsGot->getGp(file) + a;
+ return ctx.in.mipsGot->getGp(file) + a;
case R_MIPS_GOT_GP_PC: {
// R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target
// is _gp_disp symbol. In that case we should use the following
@@ -790,7 +791,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// microMIPS variants of these relocations use slightly
diff erent
// expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
// to correctly handle less-significant bit of the microMIPS symbol.
- uint64_t v = in.mipsGot->getGp(file) + a - p;
+ uint64_t v = ctx.in.mipsGot->getGp(file) + a - p;
if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16)
v += 4;
if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16)
@@ -801,21 +802,24 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// If relocation against MIPS local symbol requires GOT entry, this entry
// should be initialized by 'page address'. This address is high 16-bits
// of sum the symbol's value and the addend.
- return in.mipsGot->getVA() + in.mipsGot->getPageEntryOffset(file, sym, a) -
- in.mipsGot->getGp(file);
+ return ctx.in.mipsGot->getVA() +
+ ctx.in.mipsGot->getPageEntryOffset(file, sym, a) -
+ ctx.in.mipsGot->getGp(file);
case R_MIPS_GOT_OFF:
case R_MIPS_GOT_OFF32:
// In case of MIPS if a GOT relocation has non-zero addend this addend
// should be applied to the GOT entry content not to the GOT entry offset.
// That is why we use separate expression type.
- return in.mipsGot->getVA() + in.mipsGot->getSymEntryOffset(file, sym, a) -
- in.mipsGot->getGp(file);
+ return ctx.in.mipsGot->getVA() +
+ ctx.in.mipsGot->getSymEntryOffset(file, sym, a) -
+ ctx.in.mipsGot->getGp(file);
case R_MIPS_TLSGD:
- return in.mipsGot->getVA() + in.mipsGot->getGlobalDynOffset(file, sym) -
- in.mipsGot->getGp(file);
+ return ctx.in.mipsGot->getVA() +
+ ctx.in.mipsGot->getGlobalDynOffset(file, sym) -
+ ctx.in.mipsGot->getGp(file);
case R_MIPS_TLSLD:
- return in.mipsGot->getVA() + in.mipsGot->getTlsIndexOffset(file) -
- in.mipsGot->getGp(file);
+ return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) -
+ ctx.in.mipsGot->getGp(file);
case R_AARCH64_PAGE_PC: {
uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a);
return getAArch64Page(val) - getAArch64Page(p);
@@ -864,9 +868,9 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_LOONGARCH_PLT_PAGE_PC:
return getLoongArchPageDelta(sym.getPltVA() + a, p, type);
case R_PLT_GOTPLT:
- return sym.getPltVA() + a - in.gotPlt->getVA();
+ return sym.getPltVA() + a - ctx.in.gotPlt->getVA();
case R_PLT_GOTREL:
- return sym.getPltVA() + a - in.got->getVA();
+ return sym.getPltVA() + a - ctx.in.got->getVA();
case R_PPC32_PLTREL:
// R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
// stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
@@ -912,29 +916,32 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_SIZE:
return sym.getSize() + a;
case R_TLSDESC:
- return in.got->getTlsDescAddr(sym) + a;
+ return ctx.in.got->getTlsDescAddr(sym) + a;
case R_TLSDESC_PC:
- return in.got->getTlsDescAddr(sym) + a - p;
+ return ctx.in.got->getTlsDescAddr(sym) + a - p;
case R_TLSDESC_GOTPLT:
- return in.got->getTlsDescAddr(sym) + a - in.gotPlt->getVA();
+ return ctx.in.got->getTlsDescAddr(sym) + a - ctx.in.gotPlt->getVA();
case R_AARCH64_TLSDESC_PAGE:
- return getAArch64Page(in.got->getTlsDescAddr(sym) + a) - getAArch64Page(p);
+ return getAArch64Page(ctx.in.got->getTlsDescAddr(sym) + a) -
+ getAArch64Page(p);
case R_LOONGARCH_TLSDESC_PAGE_PC:
- return getLoongArchPageDelta(in.got->getTlsDescAddr(sym) + a, p, type);
+ return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(sym) + a, p, type);
case R_TLSGD_GOT:
- return in.got->getGlobalDynOffset(sym) + a;
+ return ctx.in.got->getGlobalDynOffset(sym) + a;
case R_TLSGD_GOTPLT:
- return in.got->getGlobalDynAddr(sym) + a - in.gotPlt->getVA();
+ return ctx.in.got->getGlobalDynAddr(sym) + a - ctx.in.gotPlt->getVA();
case R_TLSGD_PC:
- return in.got->getGlobalDynAddr(sym) + a - p;
+ return ctx.in.got->getGlobalDynAddr(sym) + a - p;
case R_LOONGARCH_TLSGD_PAGE_PC:
- return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
+ return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p,
+ type);
case R_TLSLD_GOTPLT:
- return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA();
+ return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a -
+ ctx.in.gotPlt->getVA();
case R_TLSLD_GOT:
- return in.got->getTlsIndexOff() + a;
+ return ctx.in.got->getTlsIndexOff() + a;
case R_TLSLD_PC:
- return in.got->getTlsIndexVA() + a - p;
+ return ctx.in.got->getTlsIndexVA() + a - p;
default:
llvm_unreachable("invalid expression");
}
diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp
index 4d7ffd0b990223..3e8f375fa17031 100644
--- a/lld/ELF/LinkerScript.cpp
+++ b/lld/ELF/LinkerScript.cpp
@@ -649,7 +649,7 @@ LinkerScript::computeInputSections(const InputSectionDescription *cmd,
}
void LinkerScript::discard(InputSectionBase &s) {
- if (&s == in.shStrTab.get())
+ if (&s == ctx.in.shStrTab.get())
error("discarding " + s.name + " section is not allowed");
s.markDead();
@@ -1038,7 +1038,7 @@ void LinkerScript::diagnoseOrphanHandling() const {
for (const InputSectionBase *sec : orphanSections) {
// .relro_padding is inserted before DATA_SEGMENT_RELRO_END, if present,
// automatically. The section is not supposed to be specified by scripts.
- if (sec == in.relroPadding.get())
+ if (sec == ctx.in.relroPadding.get())
continue;
// Input SHT_REL[A] retained by --emit-relocs are ignored by
// computeInputSections(). Don't warn/error.
@@ -1055,7 +1055,7 @@ void LinkerScript::diagnoseOrphanHandling() const {
}
void LinkerScript::diagnoseMissingSGSectionAddress() const {
- if (!config->cmseImplib || !in.armCmseSGSection->isNeeded())
+ if (!config->cmseImplib || !ctx.in.armCmseSGSection->isNeeded())
return;
OutputSection *sec = findByName(sectionCommands, ".gnu.sgstubs");
@@ -1237,7 +1237,7 @@ bool LinkerScript::assignOffsets(OutputSection *sec) {
// If .relro_padding is present, round up the end to a common-page-size
// boundary to protect the last page.
- if (in.relroPadding && sec == in.relroPadding->getParent())
+ if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent())
expandOutputSection(alignToPowerOf2(dot, config->commonPageSize) - dot);
// Non-SHF_ALLOC sections do not affect the addresses of other OutputSections
@@ -1361,7 +1361,8 @@ void LinkerScript::adjustOutputSections() {
// Discard .relro_padding if we have not seen one RELRO section. Note: when
// .tbss is the only RELRO section, there is no associated PT_LOAD segment
// (needsPtLoad), so we don't append .relro_padding in the case.
- if (in.relroPadding && in.relroPadding->getParent() == sec && !seenRelro)
+ if (ctx.in.relroPadding && ctx.in.relroPadding->getParent() == sec &&
+ !seenRelro)
discardable = true;
if (discardable) {
sec->markDead();
diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp
index cb17e107d6dae2..1aede8df6f66f6 100644
--- a/lld/ELF/OutputSections.cpp
+++ b/lld/ELF/OutputSections.cpp
@@ -22,6 +22,7 @@
#include "llvm/Support/Parallel.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TimeProfiler.h"
+#undef in
#if LLVM_ENABLE_ZLIB
// Avoid introducing max as a macro from Windows headers.
#define NOMINMAX
@@ -584,7 +585,7 @@ void OutputSection::writeTo(uint8_t *buf, parallel::TaskGroup &tg) {
static void finalizeShtGroup(OutputSection *os, InputSection *section) {
// sh_link field for SHT_GROUP sections should contain the section index of
// the symbol table.
- os->link = in.symTab->getParent()->sectionIndex;
+ os->link = ctx.in.symTab->getParent()->sectionIndex;
if (!section)
return;
@@ -592,7 +593,7 @@ static void finalizeShtGroup(OutputSection *os, InputSection *section) {
// sh_info then contain index of an entry in symbol table section which
// provides signature of the section group.
ArrayRef<Symbol *> symbols = section->file->getSymbols();
- os->info = in.symTab->getSymbolIndex(*symbols[section->info]);
+ os->info = ctx.in.symTab->getSymbolIndex(*symbols[section->info]);
// Some group members may be combined or discarded, so we need to compute the
// new size. The content will be rewritten in InputSection::copyShtGroup.
@@ -610,7 +611,7 @@ encodeOneCrel(raw_svector_ostream &os, Elf_Crel<sizeof(uint) == 8> &out,
uint offset, const Symbol &sym, uint32_t type, uint addend) {
const auto deltaOffset = static_cast<uint64_t>(offset - out.r_offset);
out.r_offset = offset;
- int64_t symidx = in.symTab->getSymbolIndex(sym);
+ int64_t symidx = ctx.in.symTab->getSymbolIndex(sym);
if (sym.type == STT_SECTION) {
auto *d = dyn_cast<Defined>(&sym);
if (d) {
@@ -731,7 +732,7 @@ void OutputSection::finalize() {
if (!first || isa<SyntheticSection>(first))
return;
- link = in.symTab->getParent()->sectionIndex;
+ link = ctx.in.symTab->getParent()->sectionIndex;
// sh_info for SHT_REL[A] sections should contain the section header index of
// the section to which the relocation applies.
InputSectionBase *s = first->getRelocatedSection();
@@ -881,8 +882,8 @@ void OutputSection::checkDynRelAddends(const uint8_t *bufStart) {
// Some targets have NOBITS synthetic sections with dynamic relocations
// with non-zero addends. Skip such sections.
if (is_contained({EM_PPC, EM_PPC64}, config->emachine) &&
- (rel.inputSec == in.ppc64LongBranchTarget.get() ||
- rel.inputSec == in.igotPlt.get()))
+ (rel.inputSec == ctx.in.ppc64LongBranchTarget.get() ||
+ rel.inputSec == ctx.in.igotPlt.get()))
continue;
const uint8_t *relocTarget =
bufStart + relOsec->offset + rel.inputSec->getOffset(rel.offsetInSec);
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 565a5c2f582fce..6c07051a231537 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -382,7 +382,7 @@ template <class ELFT> static void addCopyRelSymbol(SharedSymbol &ss) {
bool isRO = isReadOnly<ELFT>(ss);
BssSection *sec =
make<BssSection>(isRO ? ".bss.rel.ro" : ".bss", symSize, ss.alignment);
- OutputSection *osec = (isRO ? in.bssRelRo : in.bss)->getParent();
+ OutputSection *osec = (isRO ? ctx.in.bssRelRo : ctx.in.bss)->getParent();
// At this point, sectionBases has been migrated to sections. Append sec to
// sections.
@@ -922,12 +922,12 @@ static void addPltEntry(PltSection &plt, GotPltSection &gotPlt,
}
void elf::addGotEntry(Symbol &sym) {
- in.got->addEntry(sym);
+ ctx.in.got->addEntry(sym);
uint64_t off = sym.getGotOffset();
// If preemptible, emit a GLOB_DAT relocation.
if (sym.isPreemptible) {
- ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, in.got.get(), off,
+ ctx.mainPart->relaDyn->addReloc({ctx.target->gotRel, ctx.in.got.get(), off,
DynamicReloc::AgainstSymbol, sym, 0,
R_ABS});
return;
@@ -936,20 +936,20 @@ void elf::addGotEntry(Symbol &sym) {
// Otherwise, the value is either a link-time constant or the load base
// plus a constant.
if (!config->isPic || isAbsolute(sym))
- in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym});
+ ctx.in.got->addConstant({R_ABS, ctx.target->symbolicRel, off, 0, &sym});
else
- addRelativeReloc(*in.got, off, sym, 0, R_ABS, ctx.target->symbolicRel);
+ addRelativeReloc(*ctx.in.got, off, sym, 0, R_ABS, ctx.target->symbolicRel);
}
static void addTpOffsetGotEntry(Symbol &sym) {
- in.got->addEntry(sym);
+ ctx.in.got->addEntry(sym);
uint64_t off = sym.getGotOffset();
if (!sym.isPreemptible && !config->shared) {
- in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym});
+ ctx.in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym});
return;
}
ctx.mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
- ctx.target->tlsGotRel, *in.got, off, sym, ctx.target->symbolicRel);
+ ctx.target->tlsGotRel, *ctx.in.got, off, sym, ctx.target->symbolicRel);
}
// Return true if we can define a symbol in the executable that
@@ -1077,7 +1077,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
// If the target adjusted the expression to R_RELAX_GOT_PC, we may end up
// needing the GOT if we can't relax everything.
if (expr == R_RELAX_GOT_PC)
- in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
+ ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
}
}
@@ -1100,7 +1100,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
// See "Global Offset Table" in Chapter 5 in the following document
// for detailed description:
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
- in.mipsGot->addEntry(*sec->file, sym, addend, expr);
+ ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr);
} else if (!sym.isTls() || config->emachine != EM_LOONGARCH) {
// Many LoongArch TLS relocs reuse the R_LOONGARCH_GOT type, in which
// case the NEEDS_GOT flag shouldn't get set.
@@ -1190,7 +1190,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
// a dynamic relocation.
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf p.4-19
if (config->emachine == EM_MIPS)
- in.mipsGot->addEntry(*sec->file, sym, addend, expr);
+ ctx.in.mipsGot->addEntry(*sec->file, sym, addend, expr);
return;
}
}
@@ -1275,12 +1275,12 @@ static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym,
InputSectionBase &c, uint64_t offset,
int64_t addend, RelExpr expr) {
if (expr == R_MIPS_TLSLD) {
- in.mipsGot->addTlsIndex(*c.file);
+ ctx.in.mipsGot->addTlsIndex(*c.file);
c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
if (expr == R_MIPS_TLSGD) {
- in.mipsGot->addDynTlsEntry(*c.file, sym);
+ ctx.in.mipsGot->addDynTlsEntry(*c.file, sym);
c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
@@ -1526,10 +1526,10 @@ void RelocationScanner::scanOne(typename Relocs<RelTy>::const_iterator &i) {
// The 5 types that relative GOTPLT are all x86 and x86-64 specific.
if (oneof<R_GOTPLTONLY_PC, R_GOTPLTREL, R_GOTPLT, R_PLT_GOTPLT,
R_TLSDESC_GOTPLT, R_TLSGD_GOTPLT>(expr)) {
- in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed);
+ ctx.in.gotPlt->hasGotPltOffRel.store(true, std::memory_order_relaxed);
} else if (oneof<R_GOTONLY_PC, R_GOTREL, R_PPC32_PLTREL, R_PPC64_TOCBASE,
R_PPC64_RELAX_TOC>(expr)) {
- in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
+ ctx.in.got->hasGotOffRel.store(true, std::memory_order_relaxed);
}
// Process TLS relocations, including TLS optimizations. Note that
@@ -1732,15 +1732,16 @@ static bool handleNonPreemptibleIfunc(Symbol &sym, uint16_t flags) {
auto *directSym = makeDefined(cast<Defined>(sym));
directSym->allocateAux();
auto &dyn =
- config->androidPackDynRelocs ? *in.relaPlt : *ctx.mainPart->relaDyn;
- addPltEntry(*in.iplt, *in.igotPlt, dyn, ctx.target->iRelativeRel, *directSym);
+ config->androidPackDynRelocs ? *ctx.in.relaPlt : *ctx.mainPart->relaDyn;
+ addPltEntry(*ctx.in.iplt, *ctx.in.igotPlt, dyn, ctx.target->iRelativeRel,
+ *directSym);
sym.allocateAux();
ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx;
if (flags & HAS_DIRECT_RELOC) {
// Change the value to the IPLT and redirect all references to it.
auto &d = cast<Defined>(sym);
- d.section = in.iplt.get();
+ d.section = ctx.in.iplt.get();
d.value = d.getPltIdx() * ctx.target->ipltEntrySize;
d.size = 0;
// It's important to set the symbol type here so that dynamic loaders
@@ -1772,7 +1773,8 @@ void elf::postScanRelocations() {
if (flags & NEEDS_GOT)
addGotEntry(sym);
if (flags & NEEDS_PLT)
- addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt, ctx.target->pltRel, sym);
+ addPltEntry(*ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt,
+ ctx.target->pltRel, sym);
if (flags & NEEDS_COPY) {
if (sym.isObject()) {
invokeELFT(addCopyRelSymbol, cast<SharedSymbol>(sym));
@@ -1782,16 +1784,16 @@ void elf::postScanRelocations() {
} else {
assert(sym.isFunc() && sym.hasFlag(NEEDS_PLT));
if (!sym.isDefined()) {
- replaceWithDefined(sym, *in.plt,
+ replaceWithDefined(sym, *ctx.in.plt,
ctx.target->pltHeaderSize +
ctx.target->pltEntrySize * sym.getPltIdx(),
0);
sym.setFlags(NEEDS_COPY);
if (config->emachine == EM_PPC) {
// PPC32 canonical PLT entries are at the beginning of .glink
- cast<Defined>(sym).value = in.plt->headerSize;
- in.plt->headerSize += 16;
- cast<PPC32GlinkSection>(*in.plt).canonical_plts.push_back(&sym);
+ cast<Defined>(sym).value = ctx.in.plt->headerSize;
+ ctx.in.plt->headerSize += 16;
+ cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts.push_back(&sym);
}
}
}
@@ -1800,7 +1802,7 @@ void elf::postScanRelocations() {
if (!sym.isTls())
return;
bool isLocalInExecutable = !sym.isPreemptible && !config->shared;
- GotSection *got = in.got.get();
+ GotSection *got = ctx.in.got.get();
if (flags & NEEDS_TLSDESC) {
got->addTlsDescEntry(sym);
@@ -1842,7 +1844,7 @@ void elf::postScanRelocations() {
addTpOffsetGotEntry(sym);
};
- GotSection *got = in.got.get();
+ GotSection *got = ctx.in.got.get();
if (ctx.needsTlsLd.load(std::memory_order_relaxed) && got->addTlsIndex()) {
static Undefined dummy(ctx.internalFile, "", STB_LOCAL, 0, 0);
if (config->shared)
@@ -2379,7 +2381,7 @@ void elf::hexagonTLSSymbolUpdate(ArrayRef<OutputSection *> outputSections) {
if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
if (needEntry) {
sym->allocateAux();
- addPltEntry(*in.plt, *in.gotPlt, *in.relaPlt,
+ addPltEntry(*ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt,
ctx.target->pltRel, *sym);
needEntry = false;
}
diff --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp
index b08c679ab36850..cd3fdce06f7788 100644
--- a/lld/ELF/Symbols.cpp
+++ b/lld/ELF/Symbols.cpp
@@ -147,8 +147,8 @@ uint64_t Symbol::getVA(int64_t addend) const {
uint64_t Symbol::getGotVA() const {
if (gotInIgot)
- return in.igotPlt->getVA() + getGotPltOffset();
- return in.got->getVA() + getGotOffset();
+ return ctx.in.igotPlt->getVA() + getGotPltOffset();
+ return ctx.in.got->getVA() + getGotOffset();
}
uint64_t Symbol::getGotOffset() const {
@@ -157,8 +157,8 @@ uint64_t Symbol::getGotOffset() const {
uint64_t Symbol::getGotPltVA() const {
if (isInIplt)
- return in.igotPlt->getVA() + getGotPltOffset();
- return in.gotPlt->getVA() + getGotPltOffset();
+ return ctx.in.igotPlt->getVA() + getGotPltOffset();
+ return ctx.in.gotPlt->getVA() + getGotPltOffset();
}
uint64_t Symbol::getGotPltOffset() const {
@@ -170,8 +170,8 @@ uint64_t Symbol::getGotPltOffset() const {
uint64_t Symbol::getPltVA() const {
uint64_t outVA =
- isInIplt ? in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize
- : in.plt->getVA() + in.plt->headerSize +
+ isInIplt ? ctx.in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize
+ : ctx.in.plt->getVA() + ctx.in.plt->headerSize +
getPltIdx() * ctx.target->pltEntrySize;
// While linking microMIPS code PLT code are always microMIPS
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 3b75863ac67b01..2239c3645ae921 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -168,7 +168,7 @@ template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *buf) {
options->size = getSize();
if (!config->relocatable)
- reginfo.ri_gp_value = in.mipsGot->getGp();
+ reginfo.ri_gp_value = ctx.in.mipsGot->getGp();
memcpy(buf + sizeof(Elf_Mips_Options), ®info, sizeof(reginfo));
}
@@ -225,7 +225,7 @@ MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo reginfo)
template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *buf) {
if (!config->relocatable)
- reginfo.ri_gp_value = in.mipsGot->getGp();
+ reginfo.ri_gp_value = ctx.in.mipsGot->getGp();
memcpy(buf, ®info, sizeof(reginfo));
}
@@ -273,8 +273,8 @@ Defined *elf::addSyntheticLocal(StringRef name, uint8_t type, uint64_t value,
uint64_t size, InputSectionBase §ion) {
Defined *s = makeDefined(section.file, name, STB_LOCAL, STV_DEFAULT, type,
value, size, §ion);
- if (in.symTab)
- in.symTab->addSymbol(s);
+ if (ctx.in.symTab)
+ ctx.in.symTab->addSymbol(s);
if (config->emachine == EM_ARM && !config->isLE && config->armBe8 &&
(section.flags & SHF_EXECINSTR))
@@ -1295,14 +1295,14 @@ DynamicSection<ELFT>::DynamicSection()
// The output section .rela.dyn may include these synthetic sections:
//
// - part.relaDyn
-// - in.relaPlt: this is included if a linker script places .rela.plt inside
+// - ctx.in.relaPlt: this is included if a linker script places .rela.plt inside
// .rela.dyn
//
// DT_RELASZ is the total size of the included sections.
static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
size_t size = relaDyn.getSize();
- if (in.relaPlt->getParent() == relaDyn.getParent())
- size += in.relaPlt->getSize();
+ if (ctx.in.relaPlt->getParent() == relaDyn.getParent())
+ size += ctx.in.relaPlt->getSize();
return size;
}
@@ -1310,7 +1310,7 @@ static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
// output section. When this occurs we cannot just use the OutputSection
// Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
// overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
-static uint64_t addPltRelSz() { return in.relaPlt->getSize(); }
+static uint64_t addPltRelSz() { return ctx.in.relaPlt->getSize(); }
// Add remaining entries to complete .dynamic contents.
template <class ELFT>
@@ -1430,36 +1430,36 @@ DynamicSection<ELFT>::computeContents() {
addInt(DT_AARCH64_AUTH_RELRSZ, part.relrAuthDyn->getParent()->size);
addInt(DT_AARCH64_AUTH_RELRENT, sizeof(Elf_Relr));
}
- if (isMain && in.relaPlt->isNeeded()) {
- addInSec(DT_JMPREL, *in.relaPlt);
+ if (isMain && ctx.in.relaPlt->isNeeded()) {
+ addInSec(DT_JMPREL, *ctx.in.relaPlt);
entries.emplace_back(DT_PLTRELSZ, addPltRelSz());
switch (config->emachine) {
case EM_MIPS:
- addInSec(DT_MIPS_PLTGOT, *in.gotPlt);
+ addInSec(DT_MIPS_PLTGOT, *ctx.in.gotPlt);
break;
case EM_S390:
- addInSec(DT_PLTGOT, *in.got);
+ addInSec(DT_PLTGOT, *ctx.in.got);
break;
case EM_SPARCV9:
- addInSec(DT_PLTGOT, *in.plt);
+ addInSec(DT_PLTGOT, *ctx.in.plt);
break;
case EM_AARCH64:
- if (llvm::find_if(in.relaPlt->relocs, [](const DynamicReloc &r) {
+ if (llvm::find_if(ctx.in.relaPlt->relocs, [](const DynamicReloc &r) {
return r.type == ctx.target->pltRel &&
r.sym->stOther & STO_AARCH64_VARIANT_PCS;
- }) != in.relaPlt->relocs.end())
+ }) != ctx.in.relaPlt->relocs.end())
addInt(DT_AARCH64_VARIANT_PCS, 0);
- addInSec(DT_PLTGOT, *in.gotPlt);
+ addInSec(DT_PLTGOT, *ctx.in.gotPlt);
break;
case EM_RISCV:
- if (llvm::any_of(in.relaPlt->relocs, [](const DynamicReloc &r) {
+ if (llvm::any_of(ctx.in.relaPlt->relocs, [](const DynamicReloc &r) {
return r.type == ctx.target->pltRel &&
(r.sym->stOther & STO_RISCV_VARIANT_CC);
}))
addInt(DT_RISCV_VARIANT_CC, 0);
[[fallthrough]];
default:
- addInSec(DT_PLTGOT, *in.gotPlt);
+ addInSec(DT_PLTGOT, *ctx.in.gotPlt);
break;
}
addInt(DT_PLTREL, config->isRela ? DT_RELA : DT_REL);
@@ -1537,33 +1537,34 @@ DynamicSection<ELFT>::computeContents() {
addInt(DT_MIPS_FLAGS, RHF_NOTPOT);
addInt(DT_MIPS_BASE_ADDRESS, ctx.target->getImageBase());
addInt(DT_MIPS_SYMTABNO, part.dynSymTab->getNumSymbols());
- addInt(DT_MIPS_LOCAL_GOTNO, in.mipsGot->getLocalEntriesNum());
+ addInt(DT_MIPS_LOCAL_GOTNO, ctx.in.mipsGot->getLocalEntriesNum());
- if (const Symbol *b = in.mipsGot->getFirstGlobalEntry())
+ if (const Symbol *b = ctx.in.mipsGot->getFirstGlobalEntry())
addInt(DT_MIPS_GOTSYM, b->dynsymIndex);
else
addInt(DT_MIPS_GOTSYM, part.dynSymTab->getNumSymbols());
- addInSec(DT_PLTGOT, *in.mipsGot);
- if (in.mipsRldMap) {
+ addInSec(DT_PLTGOT, *ctx.in.mipsGot);
+ if (ctx.in.mipsRldMap) {
if (!config->pie)
- addInSec(DT_MIPS_RLD_MAP, *in.mipsRldMap);
+ addInSec(DT_MIPS_RLD_MAP, *ctx.in.mipsRldMap);
// Store the offset to the .rld_map section
// relative to the address of the tag.
addInt(DT_MIPS_RLD_MAP_REL,
- in.mipsRldMap->getVA() - (getVA() + entries.size() * entsize));
+ ctx.in.mipsRldMap->getVA() - (getVA() + entries.size() * entsize));
}
}
// DT_PPC_GOT indicates to glibc Secure PLT is used. If DT_PPC_GOT is absent,
// glibc assumes the old-style BSS PLT layout which we don't support.
if (config->emachine == EM_PPC)
- addInSec(DT_PPC_GOT, *in.got);
+ addInSec(DT_PPC_GOT, *ctx.in.got);
// Glink dynamic tag is required by the V2 abi if the plt section isn't empty.
- if (config->emachine == EM_PPC64 && in.plt->isNeeded()) {
+ if (config->emachine == EM_PPC64 && ctx.in.plt->isNeeded()) {
// The Glink tag points to 32 bytes before the first lazy symbol resolution
// stub, which starts directly after the header.
- addInt(DT_PPC64_GLINK, in.plt->getVA() + ctx.target->pltHeaderSize - 32);
+ addInt(DT_PPC64_GLINK,
+ ctx.in.plt->getVA() + ctx.target->pltHeaderSize - 32);
}
if (config->emachine == EM_PPC64)
@@ -1685,9 +1686,9 @@ void RelocationBaseSection::finalizeContents() {
else
getParent()->link = 0;
- if (in.relaPlt.get() == this && in.gotPlt->getParent()) {
+ if (ctx.in.relaPlt.get() == this && ctx.in.gotPlt->getParent()) {
getParent()->flags |= ELF::SHF_INFO_LINK;
- getParent()->info = in.gotPlt->getParent()->sectionIndex;
+ getParent()->info = ctx.in.gotPlt->getParent()->sectionIndex;
}
}
@@ -2331,7 +2332,7 @@ void SymtabShndxSection::writeTo(uint8_t *buf) {
// with an entry in .symtab. If the corresponding entry contains SHN_XINDEX,
// we need to write actual index, otherwise, we must write SHN_UNDEF(0).
buf += 4; // Ignore .symtab[0] entry.
- for (const SymbolTableEntry &entry : in.symTab->getSymbols()) {
+ for (const SymbolTableEntry &entry : ctx.in.symTab->getSymbols()) {
if (!getCommonSec(entry.sym) && getSymSectionIndex(entry.sym) == SHN_XINDEX)
write32(buf, entry.sym->getOutputSection()->sectionIndex);
buf += 4;
@@ -2352,11 +2353,11 @@ bool SymtabShndxSection::isNeeded() const {
}
void SymtabShndxSection::finalizeContents() {
- getParent()->link = in.symTab->getParent()->sectionIndex;
+ getParent()->link = ctx.in.symTab->getParent()->sectionIndex;
}
size_t SymtabShndxSection::getSize() const {
- return in.symTab->getNumSymbols() * 4;
+ return ctx.in.symTab->getNumSymbols() * 4;
}
// .hash and .gnu.hash sections contain on-disk hash tables that map
@@ -2583,7 +2584,7 @@ size_t PltSection::getSize() const {
bool PltSection::isNeeded() const {
// For -z retpolineplt, .iplt needs the .plt header.
- return !entries.empty() || (config->zRetpolineplt && in.iplt->isNeeded());
+ return !entries.empty() || (config->zRetpolineplt && ctx.in.iplt->isNeeded());
}
// Used by ARM to add mapping symbols in the PLT section, which aid
@@ -2708,15 +2709,15 @@ IBTPltSection::IBTPltSection()
: SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt") {}
void IBTPltSection::writeTo(uint8_t *buf) {
- ctx.target->writeIBTPlt(buf, in.plt->getNumEntries());
+ ctx.target->writeIBTPlt(buf, ctx.in.plt->getNumEntries());
}
size_t IBTPltSection::getSize() const {
// 16 is the header size of .plt.
- return 16 + in.plt->getNumEntries() * ctx.target->pltEntrySize;
+ return 16 + ctx.in.plt->getNumEntries() * ctx.target->pltEntrySize;
}
-bool IBTPltSection::isNeeded() const { return in.plt->getNumEntries() > 0; }
+bool IBTPltSection::isNeeded() const { return ctx.in.plt->getNumEntries() > 0; }
RelroPaddingSection::RelroPaddingSection()
: SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, 1, ".relro_padding") {
@@ -4455,7 +4456,7 @@ void PartitionIndexSection::writeTo(uint8_t *buf) {
write32(buf + 4, ctx.partitions[i].elfHeader->getVA() - (va + 4));
SyntheticSection *next = i == ctx.partitions.size() - 1
- ? in.partEnd.get()
+ ? ctx.in.partEnd.get()
: ctx.partitions[i + 1].elfHeader.get();
write32(buf + 8, next->getVA() - ctx.partitions[i].elfHeader->getVA());
@@ -4667,41 +4668,41 @@ template <class ELFT> void elf::createSyntheticSections() {
auto add = [](SyntheticSection &sec) { ctx.inputSections.push_back(&sec); };
if (config->zSectionHeader)
- in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false);
+ ctx.in.shStrTab = std::make_unique<StringTableSection>(".shstrtab", false);
ctx.out.programHeaders = make<OutputSection>("", 0, SHF_ALLOC);
ctx.out.programHeaders->addralign = config->wordsize;
if (config->strip != StripPolicy::All) {
- in.strTab = std::make_unique<StringTableSection>(".strtab", false);
- in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*in.strTab);
- in.symTabShndx = std::make_unique<SymtabShndxSection>();
+ ctx.in.strTab = std::make_unique<StringTableSection>(".strtab", false);
+ ctx.in.symTab = std::make_unique<SymbolTableSection<ELFT>>(*ctx.in.strTab);
+ ctx.in.symTabShndx = std::make_unique<SymtabShndxSection>();
}
- in.bss = std::make_unique<BssSection>(".bss", 0, 1);
- add(*in.bss);
+ ctx.in.bss = std::make_unique<BssSection>(".bss", 0, 1);
+ add(*ctx.in.bss);
// If there is a SECTIONS command and a .data.rel.ro section name use name
// .data.rel.ro.bss so that we match in the .data.rel.ro output section.
// This makes sure our relro is contiguous.
bool hasDataRelRo =
ctx.script->hasSectionsCommand && findSection(".data.rel.ro");
- in.bssRelRo = std::make_unique<BssSection>(
+ ctx.in.bssRelRo = std::make_unique<BssSection>(
hasDataRelRo ? ".data.rel.ro.bss" : ".bss.rel.ro", 0, 1);
- add(*in.bssRelRo);
+ add(*ctx.in.bssRelRo);
// Add MIPS-specific sections.
if (config->emachine == EM_MIPS) {
if (!config->shared && config->hasDynSymTab) {
- in.mipsRldMap = std::make_unique<MipsRldMapSection>();
- add(*in.mipsRldMap);
+ ctx.in.mipsRldMap = std::make_unique<MipsRldMapSection>();
+ add(*ctx.in.mipsRldMap);
}
- if ((in.mipsAbiFlags = MipsAbiFlagsSection<ELFT>::create()))
- add(*in.mipsAbiFlags);
- if ((in.mipsOptions = MipsOptionsSection<ELFT>::create()))
- add(*in.mipsOptions);
- if ((in.mipsReginfo = MipsReginfoSection<ELFT>::create()))
- add(*in.mipsReginfo);
+ if ((ctx.in.mipsAbiFlags = MipsAbiFlagsSection<ELFT>::create()))
+ add(*ctx.in.mipsAbiFlags);
+ if ((ctx.in.mipsOptions = MipsOptionsSection<ELFT>::create()))
+ add(*ctx.in.mipsOptions);
+ if ((ctx.in.mipsReginfo = MipsReginfoSection<ELFT>::create()))
+ add(*ctx.in.mipsReginfo);
}
StringRef relaDynName = config->isRela ? ".rela.dyn" : ".rel.dyn";
@@ -4815,97 +4816,98 @@ template <class ELFT> void elf::createSyntheticSections() {
// Create the partition end marker. This needs to be in partition number 255
// so that it is sorted after all other partitions. It also has other
// special handling (see createPhdrs() and combineEhSections()).
- in.partEnd =
+ ctx.in.partEnd =
std::make_unique<BssSection>(".part.end", config->maxPageSize, 1);
- in.partEnd->partition = 255;
- add(*in.partEnd);
+ ctx.in.partEnd->partition = 255;
+ add(*ctx.in.partEnd);
- in.partIndex = std::make_unique<PartitionIndexSection>();
- addOptionalRegular("__part_index_begin", in.partIndex.get(), 0);
- addOptionalRegular("__part_index_end", in.partIndex.get(),
- in.partIndex->getSize());
- add(*in.partIndex);
+ ctx.in.partIndex = std::make_unique<PartitionIndexSection>();
+ addOptionalRegular("__part_index_begin", ctx.in.partIndex.get(), 0);
+ addOptionalRegular("__part_index_end", ctx.in.partIndex.get(),
+ ctx.in.partIndex->getSize());
+ add(*ctx.in.partIndex);
}
// Add .got. MIPS' .got is so
diff erent from the other archs,
// it has its own class.
if (config->emachine == EM_MIPS) {
- in.mipsGot = std::make_unique<MipsGotSection>();
- add(*in.mipsGot);
+ ctx.in.mipsGot = std::make_unique<MipsGotSection>();
+ add(*ctx.in.mipsGot);
} else {
- in.got = std::make_unique<GotSection>();
- add(*in.got);
+ ctx.in.got = std::make_unique<GotSection>();
+ add(*ctx.in.got);
}
if (config->emachine == EM_PPC) {
- in.ppc32Got2 = std::make_unique<PPC32Got2Section>();
- add(*in.ppc32Got2);
+ ctx.in.ppc32Got2 = std::make_unique<PPC32Got2Section>();
+ add(*ctx.in.ppc32Got2);
}
if (config->emachine == EM_PPC64) {
- in.ppc64LongBranchTarget = std::make_unique<PPC64LongBranchTargetSection>();
- add(*in.ppc64LongBranchTarget);
+ ctx.in.ppc64LongBranchTarget =
+ std::make_unique<PPC64LongBranchTargetSection>();
+ add(*ctx.in.ppc64LongBranchTarget);
}
- in.gotPlt = std::make_unique<GotPltSection>();
- add(*in.gotPlt);
- in.igotPlt = std::make_unique<IgotPltSection>();
- add(*in.igotPlt);
+ ctx.in.gotPlt = std::make_unique<GotPltSection>();
+ add(*ctx.in.gotPlt);
+ ctx.in.igotPlt = std::make_unique<IgotPltSection>();
+ add(*ctx.in.igotPlt);
// Add .relro_padding if DATA_SEGMENT_RELRO_END is used; otherwise, add the
// section in the absence of PHDRS/SECTIONS commands.
if (config->zRelro &&
((ctx.script->phdrsCommands.empty() && !ctx.script->hasSectionsCommand) ||
ctx.script->seenRelroEnd)) {
- in.relroPadding = std::make_unique<RelroPaddingSection>();
- add(*in.relroPadding);
+ ctx.in.relroPadding = std::make_unique<RelroPaddingSection>();
+ add(*ctx.in.relroPadding);
}
if (config->emachine == EM_ARM) {
- in.armCmseSGSection = std::make_unique<ArmCmseSGSection>();
- add(*in.armCmseSGSection);
+ ctx.in.armCmseSGSection = std::make_unique<ArmCmseSGSection>();
+ add(*ctx.in.armCmseSGSection);
}
// _GLOBAL_OFFSET_TABLE_ is defined relative to either .got.plt or .got. Treat
// it as a relocation and ensure the referenced section is created.
if (ctx.sym.globalOffsetTable && config->emachine != EM_MIPS) {
if (ctx.target->gotBaseSymInGotPlt)
- in.gotPlt->hasGotPltOffRel = true;
+ ctx.in.gotPlt->hasGotPltOffRel = true;
else
- in.got->hasGotOffRel = true;
+ ctx.in.got->hasGotOffRel = true;
}
// We always need to add rel[a].plt to output if it has entries.
// Even for static linking it can contain R_[*]_IRELATIVE relocations.
- in.relaPlt = std::make_unique<RelocationSection<ELFT>>(
+ ctx.in.relaPlt = std::make_unique<RelocationSection<ELFT>>(
config->isRela ? ".rela.plt" : ".rel.plt", /*sort=*/false,
/*threadCount=*/1);
- add(*in.relaPlt);
+ add(*ctx.in.relaPlt);
if ((config->emachine == EM_386 || config->emachine == EM_X86_64) &&
(config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT)) {
- in.ibtPlt = std::make_unique<IBTPltSection>();
- add(*in.ibtPlt);
+ ctx.in.ibtPlt = std::make_unique<IBTPltSection>();
+ add(*ctx.in.ibtPlt);
}
if (config->emachine == EM_PPC)
- in.plt = std::make_unique<PPC32GlinkSection>();
+ ctx.in.plt = std::make_unique<PPC32GlinkSection>();
else
- in.plt = std::make_unique<PltSection>();
- add(*in.plt);
- in.iplt = std::make_unique<IpltSection>();
- add(*in.iplt);
+ ctx.in.plt = std::make_unique<PltSection>();
+ add(*ctx.in.plt);
+ ctx.in.iplt = std::make_unique<IpltSection>();
+ add(*ctx.in.iplt);
if (config->andFeatures || !ctx.aarch64PauthAbiCoreInfo.empty())
add(*make<GnuPropertySection>());
if (config->debugNames) {
- in.debugNames = std::make_unique<DebugNamesSection<ELFT>>();
- add(*in.debugNames);
+ ctx.in.debugNames = std::make_unique<DebugNamesSection<ELFT>>();
+ add(*ctx.in.debugNames);
}
if (config->gdbIndex) {
- in.gdbIndex = GdbIndexSection::create<ELFT>();
- add(*in.gdbIndex);
+ ctx.in.gdbIndex = GdbIndexSection::create<ELFT>();
+ add(*ctx.in.gdbIndex);
}
// .note.GNU-stack is always added when we are creating a re-linkable
@@ -4916,18 +4918,16 @@ template <class ELFT> void elf::createSyntheticSections() {
if (config->relocatable)
add(*make<GnuStackSection>());
- if (in.symTab)
- add(*in.symTab);
- if (in.symTabShndx)
- add(*in.symTabShndx);
- if (in.shStrTab)
- add(*in.shStrTab);
- if (in.strTab)
- add(*in.strTab);
+ if (ctx.in.symTab)
+ add(*ctx.in.symTab);
+ if (ctx.in.symTabShndx)
+ add(*ctx.in.symTabShndx);
+ if (ctx.in.shStrTab)
+ add(*ctx.in.shStrTab);
+ if (ctx.in.strTab)
+ add(*ctx.in.strTab);
}
-InStruct elf::in;
-
template void elf::splitSections<ELF32LE>();
template void elf::splitSections<ELF32BE>();
template void elf::splitSections<ELF64LE>();
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index 4bfa5cd73d35ee..8e2664d64118b7 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -1479,44 +1479,6 @@ inline Partition &SectionBase::getPartition() const {
return ctx.partitions[partition - 1];
}
-// Linker generated sections which can be used as inputs and are not specific to
-// a partition.
-struct InStruct {
- std::unique_ptr<InputSection> attributes;
- std::unique_ptr<SyntheticSection> riscvAttributes;
- std::unique_ptr<BssSection> bss;
- std::unique_ptr<BssSection> bssRelRo;
- std::unique_ptr<GotSection> got;
- std::unique_ptr<GotPltSection> gotPlt;
- std::unique_ptr<IgotPltSection> igotPlt;
- std::unique_ptr<RelroPaddingSection> relroPadding;
- std::unique_ptr<SyntheticSection> armCmseSGSection;
- std::unique_ptr<PPC64LongBranchTargetSection> ppc64LongBranchTarget;
- std::unique_ptr<SyntheticSection> mipsAbiFlags;
- std::unique_ptr<MipsGotSection> mipsGot;
- std::unique_ptr<SyntheticSection> mipsOptions;
- std::unique_ptr<SyntheticSection> mipsReginfo;
- std::unique_ptr<MipsRldMapSection> mipsRldMap;
- std::unique_ptr<SyntheticSection> partEnd;
- std::unique_ptr<SyntheticSection> partIndex;
- std::unique_ptr<PltSection> plt;
- std::unique_ptr<IpltSection> iplt;
- std::unique_ptr<PPC32Got2Section> ppc32Got2;
- std::unique_ptr<IBTPltSection> ibtPlt;
- std::unique_ptr<RelocationBaseSection> relaPlt;
- // Non-SHF_ALLOC sections
- std::unique_ptr<SyntheticSection> debugNames;
- std::unique_ptr<GdbIndexSection> gdbIndex;
- std::unique_ptr<StringTableSection> shStrTab;
- std::unique_ptr<StringTableSection> strTab;
- std::unique_ptr<SymbolTableBaseSection> symTab;
- std::unique_ptr<SymtabShndxSection> symTabShndx;
-
- void reset();
-};
-
-LLVM_LIBRARY_VISIBILITY extern InStruct in;
-
} // namespace lld::elf
#endif
diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index 1b814039cf10da..349e31332f59aa 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -473,9 +473,9 @@ class PPC64PILongBranchThunk final : public PPC64LongBranchThunk {
: PPC64LongBranchThunk(dest, addend) {
assert(!dest.isPreemptible);
if (std::optional<uint32_t> index =
- in.ppc64LongBranchTarget->addEntry(&dest, addend)) {
+ ctx.in.ppc64LongBranchTarget->addEntry(&dest, addend)) {
ctx.mainPart->relaDyn->addRelativeReloc(
- ctx.target->relativeRel, *in.ppc64LongBranchTarget,
+ ctx.target->relativeRel, *ctx.in.ppc64LongBranchTarget,
*index * UINT64_C(8), dest,
addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther),
ctx.target->symbolicRel, R_ABS);
@@ -487,7 +487,7 @@ class PPC64PDLongBranchThunk final : public PPC64LongBranchThunk {
public:
PPC64PDLongBranchThunk(Symbol &dest, int64_t addend)
: PPC64LongBranchThunk(dest, addend) {
- in.ppc64LongBranchTarget->addEntry(&dest, addend);
+ ctx.in.ppc64LongBranchTarget->addEntry(&dest, addend);
}
};
@@ -1052,12 +1052,12 @@ void elf::writePPC32PltCallStub(uint8_t *buf, uint64_t gotPltVA,
// almost always 0x8000. The address of .got2 is
diff erent in another object
// file, so a stub cannot be shared.
offset = gotPltVA -
- (in.ppc32Got2->getParent()->getVA() +
+ (ctx.in.ppc32Got2->getParent()->getVA() +
(file->ppc32Got2 ? file->ppc32Got2->outSecOff : 0) + addend);
} else {
// The stub loads an address relative to _GLOBAL_OFFSET_TABLE_ (which is
// currently the address of .got).
- offset = gotPltVA - in.got->getVA();
+ offset = gotPltVA - ctx.in.got->getVA();
}
uint16_t ha = (offset + 0x8000) >> 16, l = (uint16_t)offset;
if (ha == 0) {
@@ -1176,9 +1176,9 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) {
write32(buf + nextInstOffset, MTCTR_R12); // mtctr r12
write32(buf + nextInstOffset + 4, BCTR); // bctr
} else {
- in.ppc64LongBranchTarget->addEntry(&destination, addend);
+ ctx.in.ppc64LongBranchTarget->addEntry(&destination, addend);
const int64_t offsetFromTOC =
- in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
+ ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
getPPC64TocBase();
writePPC64LoadAndBranch(buf + 4, offsetFromTOC);
}
@@ -1238,8 +1238,9 @@ bool PPC64R12SetupStub::isCompatibleWith(const InputSection &isec,
}
void PPC64LongBranchThunk::writeTo(uint8_t *buf) {
- int64_t offset = in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
- getPPC64TocBase();
+ int64_t offset =
+ ctx.in.ppc64LongBranchTarget->getEntryVA(&destination, addend) -
+ getPPC64TocBase();
writePPC64LoadAndBranch(buf, offset);
}
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index d00586bc939901..8999fdc1b29af3 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -485,8 +485,8 @@ static void demoteAndCopyLocalSymbols() {
if (dr->section && !dr->section->isLive())
demoteDefined(*dr, sectionIndexMap);
- else if (in.symTab && includeInSymtab(*b) && shouldKeepInSymtab(*dr))
- in.symTab->addSymbol(b);
+ else if (ctx.in.symTab && includeInSymtab(*b) && shouldKeepInSymtab(*dr))
+ ctx.in.symTab->addSymbol(b);
}
}
}
@@ -529,9 +529,9 @@ template <class ELFT> void Writer<ELFT>::addSectionSymbols() {
// Set the symbol to be relative to the output section so that its st_value
// equals the output section address. Note, there may be a gap between the
// start of the output section and isec.
- in.symTab->addSymbol(makeDefined(isec->file, "", STB_LOCAL, /*stOther=*/0,
- STT_SECTION,
- /*value=*/0, /*size=*/0, &osec));
+ ctx.in.symTab->addSymbol(makeDefined(isec->file, "", STB_LOCAL,
+ /*stOther=*/0, STT_SECTION,
+ /*value=*/0, /*size=*/0, &osec));
}
}
@@ -578,7 +578,7 @@ static bool isRelroSection(const OutputSection *sec) {
// .got contains pointers to external symbols. They are resolved by
// the dynamic linker when a module is loaded into memory, and after
// that they are not expected to change. So, it can be in RELRO.
- if (in.got && sec == in.got->getParent())
+ if (ctx.in.got && sec == ctx.in.got->getParent())
return true;
// .toc is a GOT-ish section for PowerPC64. Their contents are accessed
@@ -593,10 +593,10 @@ static bool isRelroSection(const OutputSection *sec) {
// by default resolved lazily, so we usually cannot put it into RELRO.
// However, if "-z now" is given, the lazy symbol resolution is
// disabled, which enables us to put it into RELRO.
- if (sec == in.gotPlt->getParent())
+ if (sec == ctx.in.gotPlt->getParent())
return config->zNow;
- if (in.relroPadding && sec == in.relroPadding->getParent())
+ if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent())
return true;
// .dynamic section contains data for the dynamic linker, and
@@ -825,10 +825,10 @@ template <class ELFT> void Writer<ELFT>::setReservedSymbolSections() {
if (ctx.sym.globalOffsetTable) {
// The _GLOBAL_OFFSET_TABLE_ symbol is defined by target convention usually
// to the start of the .got or .got.plt section.
- InputSection *sec = in.gotPlt.get();
+ InputSection *sec = ctx.in.gotPlt.get();
if (!ctx.target->gotBaseSymInGotPlt)
- sec = in.mipsGot ? cast<InputSection>(in.mipsGot.get())
- : cast<InputSection>(in.got.get());
+ sec = ctx.in.mipsGot ? cast<InputSection>(ctx.in.mipsGot.get())
+ : cast<InputSection>(ctx.in.got.get());
ctx.sym.globalOffsetTable->section = sec;
}
@@ -953,7 +953,7 @@ findOrphanPos(SmallVectorImpl<SectionCommand *>::iterator b,
// As a special case, place .relro_padding before the SymbolAssignment using
// DATA_SEGMENT_RELRO_END, if present.
- if (in.relroPadding && sec == in.relroPadding->getParent()) {
+ if (ctx.in.relroPadding && sec == ctx.in.relroPadding->getParent()) {
auto i = std::find_if(b, e, [=](SectionCommand *a) {
if (auto *assign = dyn_cast<SymbolAssignment>(a))
return assign->dataSegmentRelroEnd;
@@ -1481,9 +1481,9 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
changed |= a32p.createFixes();
}
- finalizeSynthetic(in.got.get());
- if (in.mipsGot)
- in.mipsGot->updateAllocSize();
+ finalizeSynthetic(ctx.in.got.get());
+ if (ctx.in.mipsGot)
+ ctx.in.mipsGot->updateAllocSize();
for (Partition &part : ctx.partitions) {
// The R_AARCH64_AUTH_RELATIVE has a smaller addend field as bits [63:32]
@@ -1805,10 +1805,10 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
reportUndefinedSymbols();
postScanRelocations();
- if (in.plt && in.plt->isNeeded())
- in.plt->addSymbols();
- if (in.iplt && in.iplt->isNeeded())
- in.iplt->addSymbols();
+ if (ctx.in.plt && ctx.in.plt->isNeeded())
+ ctx.in.plt->addSymbols();
+ if (ctx.in.iplt && ctx.in.iplt->isNeeded())
+ ctx.in.iplt->addSymbols();
if (config->unresolvedSymbolsInShlib != UnresolvedPolicy::Ignore) {
auto diagnose =
@@ -1861,8 +1861,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
continue;
if (!config->relocatable)
sym->binding = sym->computeBinding();
- if (in.symTab)
- in.symTab->addSymbol(sym);
+ if (ctx.in.symTab)
+ ctx.in.symTab->addSymbol(sym);
if (sym->includeInDynsym()) {
ctx.partitions[sym->partition - 1].dynSymTab->addSymbol(sym);
@@ -1886,8 +1886,8 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
}
}
- if (in.mipsGot)
- in.mipsGot->build();
+ if (ctx.in.mipsGot)
+ ctx.in.mipsGot->build();
removeUnusedSyntheticSections();
ctx.script->diagnoseOrphanHandling();
@@ -1896,16 +1896,17 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
sortSections();
// Create a list of OutputSections, assign sectionIndex, and populate
- // in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC sections.
+ // ctx.in.shStrTab. If -z nosectionheader is specified, drop non-ALLOC
+ // sections.
for (SectionCommand *cmd : ctx.script->sectionCommands)
if (auto *osd = dyn_cast<OutputDesc>(cmd)) {
OutputSection *osec = &osd->osec;
- if (!in.shStrTab && !(osec->flags & SHF_ALLOC))
+ if (!ctx.in.shStrTab && !(osec->flags & SHF_ALLOC))
continue;
ctx.outputSections.push_back(osec);
osec->sectionIndex = ctx.outputSections.size();
- if (in.shStrTab)
- osec->shName = in.shStrTab->addString(osec->name);
+ if (ctx.in.shStrTab)
+ osec->shName = ctx.in.shStrTab->addString(osec->name);
}
// Prefer command line supplied address over other constraints.
@@ -1977,20 +1978,20 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
{
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
- finalizeSynthetic(in.bss.get());
- finalizeSynthetic(in.bssRelRo.get());
- finalizeSynthetic(in.symTabShndx.get());
- finalizeSynthetic(in.shStrTab.get());
- finalizeSynthetic(in.strTab.get());
- finalizeSynthetic(in.got.get());
- finalizeSynthetic(in.mipsGot.get());
- finalizeSynthetic(in.igotPlt.get());
- finalizeSynthetic(in.gotPlt.get());
- finalizeSynthetic(in.relaPlt.get());
- finalizeSynthetic(in.plt.get());
- finalizeSynthetic(in.iplt.get());
- finalizeSynthetic(in.ppc32Got2.get());
- finalizeSynthetic(in.partIndex.get());
+ finalizeSynthetic(ctx.in.bss.get());
+ finalizeSynthetic(ctx.in.bssRelRo.get());
+ finalizeSynthetic(ctx.in.symTabShndx.get());
+ finalizeSynthetic(ctx.in.shStrTab.get());
+ finalizeSynthetic(ctx.in.strTab.get());
+ finalizeSynthetic(ctx.in.got.get());
+ finalizeSynthetic(ctx.in.mipsGot.get());
+ finalizeSynthetic(ctx.in.igotPlt.get());
+ finalizeSynthetic(ctx.in.gotPlt.get());
+ finalizeSynthetic(ctx.in.relaPlt.get());
+ finalizeSynthetic(ctx.in.plt.get());
+ finalizeSynthetic(ctx.in.iplt.get());
+ finalizeSynthetic(ctx.in.ppc32Got2.get());
+ finalizeSynthetic(ctx.in.partIndex.get());
// Dynamic section must be the last one in this list and dynamic
// symbol table section (dynSymTab) must be the first one.
@@ -2055,14 +2056,14 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
llvm::TimeTraceScope timeScope("Finalize synthetic sections");
// finalizeAddressDependentContent may have added local symbols to the
// static symbol table.
- finalizeSynthetic(in.symTab.get());
- finalizeSynthetic(in.debugNames.get());
- finalizeSynthetic(in.ppc64LongBranchTarget.get());
- finalizeSynthetic(in.armCmseSGSection.get());
+ finalizeSynthetic(ctx.in.symTab.get());
+ finalizeSynthetic(ctx.in.debugNames.get());
+ finalizeSynthetic(ctx.in.ppc64LongBranchTarget.get());
+ finalizeSynthetic(ctx.in.armCmseSGSection.get());
}
// Relaxation to delete inter-basic block jumps created by basic block
- // sections. Run after in.symTab is finalized as optimizeBasicBlockJumps
+ // sections. Run after ctx.in.symTab is finalized as optimizeBasicBlockJumps
// can relax jump instructions based on symbol offset.
if (config->optimizeBBJumps)
optimizeBasicBlockJumps();
@@ -2731,7 +2732,7 @@ template <class ELFT> void Writer<ELFT>::writeHeader() {
eHdr->e_entry = getEntryAddr();
// If -z nosectionheader is specified, omit the section header table.
- if (!in.shStrTab)
+ if (!ctx.in.shStrTab)
return;
eHdr->e_shoff = sectionHeaderOff;
@@ -2751,7 +2752,7 @@ template <class ELFT> void Writer<ELFT>::writeHeader() {
else
eHdr->e_shnum = num;
- uint32_t strTabIndex = in.shStrTab->getParent()->sectionIndex;
+ uint32_t strTabIndex = ctx.in.shStrTab->getParent()->sectionIndex;
if (strTabIndex >= SHN_LORESERVE) {
sHdrs->sh_link = strTabIndex;
eHdr->e_shstrndx = SHN_XINDEX;
More information about the llvm-commits
mailing list