[lld] acb2b1e - [ELF] Pass Ctx & to Symbols

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 6 16:59:10 PDT 2024


Author: Fangrui Song
Date: 2024-10-06T16:59:04-07:00
New Revision: acb2b1e7792f1ca2348752ee158882bb54b7d1e4

URL: https://github.com/llvm/llvm-project/commit/acb2b1e7792f1ca2348752ee158882bb54b7d1e4
DIFF: https://github.com/llvm/llvm-project/commit/acb2b1e7792f1ca2348752ee158882bb54b7d1e4.diff

LOG: [ELF] Pass Ctx & to Symbols

Added: 
    

Modified: 
    lld/ELF/ARMErrataFix.cpp
    lld/ELF/Arch/AArch64.cpp
    lld/ELF/Arch/ARM.cpp
    lld/ELF/Arch/Hexagon.cpp
    lld/ELF/Arch/LoongArch.cpp
    lld/ELF/Arch/Mips.cpp
    lld/ELF/Arch/PPC.cpp
    lld/ELF/Arch/PPC64.cpp
    lld/ELF/Arch/RISCV.cpp
    lld/ELF/Arch/SystemZ.cpp
    lld/ELF/Arch/X86.cpp
    lld/ELF/Arch/X86_64.cpp
    lld/ELF/InputSection.cpp
    lld/ELF/Relocations.cpp
    lld/ELF/Symbols.cpp
    lld/ELF/Symbols.h
    lld/ELF/SyntheticSections.cpp
    lld/ELF/Thunks.cpp

Removed: 
    


################################################################################
diff  --git a/lld/ELF/ARMErrataFix.cpp b/lld/ELF/ARMErrataFix.cpp
index 36e7bc958ee56b..2fd191306353d4 100644
--- a/lld/ELF/ARMErrataFix.cpp
+++ b/lld/ELF/ARMErrataFix.cpp
@@ -215,7 +215,8 @@ static bool branchDestInFirstRegion(Ctx &ctx, const InputSection *isec,
   // find the destination address as the branch could be indirected via a thunk
   // or the PLT.
   if (r) {
-    uint64_t dst = (r->expr == R_PLT_PC) ? r->sym->getPltVA() : r->sym->getVA();
+    uint64_t dst =
+        (r->expr == R_PLT_PC) ? r->sym->getPltVA(ctx) : r->sym->getVA();
     // Account for Thumb PC bias, usually cancelled to 0 by addend of -4.
     destAddr = dst + r->addend + 4;
   } else {
@@ -443,8 +444,9 @@ static void implementPatch(ScanResult sr, InputSection *isec,
       // The final target of the branch may be ARM or Thumb, if the target
       // is ARM then we write the patch in ARM state to avoid a state change
       // Thunk from the patch to the target.
-      uint64_t dstSymAddr = (sr.rel->expr == R_PLT_PC) ? sr.rel->sym->getPltVA()
-                                                       : sr.rel->sym->getVA();
+      uint64_t dstSymAddr = (sr.rel->expr == R_PLT_PC)
+                                ? sr.rel->sym->getPltVA(ctx)
+                                : sr.rel->sym->getVA();
       destIsARM = (dstSymAddr & 1) == 0;
     }
     psec = make<Patch657417Section>(isec, sr.off, sr.instr, destIsARM);

diff  --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 785ebcb30c760c..8e133a444e3862 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -34,7 +34,7 @@ uint64_t elf::getAArch64Page(uint64_t expr) {
 // PACIBSP.
 bool elf::isAArch64BTILandingPad(Symbol &s, int64_t a) {
   // PLT entries accessed indirectly have a BTI c.
-  if (s.isInPlt())
+  if (s.isInPlt(ctx))
     return true;
   Defined *d = dyn_cast<Defined>(&s);
   if (!isa_and_nonnull<InputSection>(d->section))
@@ -394,7 +394,7 @@ void AArch64::writePlt(uint8_t *buf, const Symbol &sym,
   };
   memcpy(buf, inst, sizeof(inst));
 
-  uint64_t gotPltEntryAddr = sym.getGotPltVA();
+  uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
   relocateNoSym(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
@@ -408,7 +408,7 @@ bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   // be resolved as a branch to the next instruction. If it is hidden, its
   // binding has been converted to local, so we just check isUndefined() here. A
   // undefined non-weak symbol will have been errored.
-  if (s.isUndefined() && !s.isInPlt())
+  if (s.isUndefined() && !s.isInPlt(ctx))
     return false;
   // ELF for the ARM 64-bit architecture, section Call and Jump relocations
   // only permits range extension thunks for R_AARCH64_CALL26 and
@@ -416,7 +416,7 @@ bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
       type != R_AARCH64_PLT32)
     return false;
-  uint64_t dst = expr == R_PLT_PC ? s.getPltVA() : s.getVA(a);
+  uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(a);
   return !inBranchRange(type, branchAddr, dst);
 }
 
@@ -1089,7 +1089,7 @@ void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,
     pltEntryAddr += sizeof(btiData);
   }
 
-  uint64_t gotPltEntryAddr = sym.getGotPltVA();
+  uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
   memcpy(buf, addrInst, sizeof(addrInst));
   relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
                 getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));

diff  --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 913d3894c3584c..a52f53b74e7826 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -321,7 +321,7 @@ static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
 void ARM::writePlt(uint8_t *buf, const Symbol &sym,
                    uint64_t pltEntryAddr) const {
   if (!useThumbPLTs(ctx)) {
-    uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 8;
+    uint64_t offset = sym.getGotPltVA(ctx) - pltEntryAddr - 8;
 
     // The PLT entry is similar to the example given in Appendix A of ELF for
     // the Arm Architecture. Instead of using the Group Relocations to find the
@@ -335,7 +335,7 @@ void ARM::writePlt(uint8_t *buf, const Symbol &sym,
     };
     if (!llvm::isUInt<27>(offset)) {
       // We cannot encode the Offset, use the long form.
-      writePltLong(buf, sym.getGotPltVA(), pltEntryAddr);
+      writePltLong(buf, sym.getGotPltVA(ctx), pltEntryAddr);
       return;
     }
     write32(buf + 0, pltData[0] | ((offset >> 20) & 0xff));
@@ -343,7 +343,7 @@ void ARM::writePlt(uint8_t *buf, const Symbol &sym,
     write32(buf + 8, pltData[2] | (offset & 0xfff));
     memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary
   } else {
-    uint64_t offset = sym.getGotPltVA() - pltEntryAddr - 12;
+    uint64_t offset = sym.getGotPltVA(ctx) - pltEntryAddr - 12;
     assert(llvm::isUInt<32>(offset) && "This should always fit into a 32-bit offset");
 
     // A PLT entry will be:
@@ -387,7 +387,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   // be resolved as a branch to the next instruction. If it is hidden, its
   // binding has been converted to local, so we just check isUndefined() here. A
   // undefined non-weak symbol will have been errored.
-  if (s.isUndefined() && !s.isInPlt())
+  if (s.isUndefined() && !s.isInPlt(ctx))
     return false;
   // A state change from ARM to Thumb and vice versa must go through an
   // interworking thunk if the relocation type is not R_ARM_CALL or
@@ -404,7 +404,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
       return true;
     [[fallthrough]];
   case R_ARM_CALL: {
-    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA();
     return !inBranchRange(type, branchAddr, dst + a) ||
         (!ctx.arg.armHasBlx && (s.getVA() & 1));
   }
@@ -417,7 +417,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
       return true;
     [[fallthrough]];
   case R_ARM_THM_CALL: {
-    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA();
     return !inBranchRange(type, branchAddr, dst + a) ||
         (!ctx.arg.armHasBlx && (s.getVA() & 1) == 0);;
   }
@@ -686,9 +686,9 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     bool isBlx = (read16(loc + 2) & 0x1000) == 0;
     // lld 10.0 and before always used bit0Thumb when deciding to write a BLX
     // even when type not STT_FUNC.
-    if (!rel.sym->isFunc() && !rel.sym->isInPlt() && isBlx == useThumb)
+    if (!rel.sym->isFunc() && !rel.sym->isInPlt(ctx) && isBlx == useThumb)
       stateChangeWarning(ctx, loc, rel.type, *rel.sym);
-    if ((rel.sym->isFunc() || rel.sym->isInPlt()) ? !useThumb : isBlx) {
+    if ((rel.sym->isFunc() || rel.sym->isInPlt(ctx)) ? !useThumb : isBlx) {
       // We are writing a BLX. Ensure BLX destination is 4-byte aligned. As
       // the BLX instruction may only be two byte aligned. This must be done
       // before overflow check.

diff  --git a/lld/ELF/Arch/Hexagon.cpp b/lld/ELF/Arch/Hexagon.cpp
index d689fc2a152101..ab05640c342e75 100644
--- a/lld/ELF/Arch/Hexagon.cpp
+++ b/lld/ELF/Arch/Hexagon.cpp
@@ -374,7 +374,7 @@ void Hexagon::writePlt(uint8_t *buf, const Symbol &sym,
   };
   memcpy(buf, inst, sizeof(inst));
 
-  uint64_t gotPltEntryAddr = sym.getGotPltVA();
+  uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
   relocateNoSym(buf, R_HEX_B32_PCREL_X, gotPltEntryAddr - pltEntryAddr);
   relocateNoSym(buf + 4, R_HEX_6_PCREL_X, gotPltEntryAddr - pltEntryAddr);
 }

diff  --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index ebbd203a05368c..81f4131604e115 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -366,7 +366,7 @@ void LoongArch::writePlt(uint8_t *buf, const Symbol &sym,
   //   ld.[wd]   $t3, $t3, %pcrel_lo12(f at .got.plt)
   //   jirl      $t1, $t3, 0
   //   nop
-  uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
+  uint32_t offset = sym.getGotPltVA(ctx) - pltEntryAddr;
   write32le(buf + 0, insn(PCADDU12I, R_T3, hi20(offset), 0));
   write32le(buf + 4,
             insn(ctx.arg.is64 ? LD_D : LD_W, R_T3, R_T3, lo12(offset)));

diff  --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index 30d85b2678d50c..7a4ead338568e5 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -319,7 +319,7 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
 template <class ELFT>
 void MIPS<ELFT>::writePlt(uint8_t *buf, const Symbol &sym,
                           uint64_t pltEntryAddr) const {
-  uint64_t gotPltEntryAddr = sym.getGotPltVA();
+  uint64_t gotPltEntryAddr = sym.getGotPltVA(ctx);
   if (isMicroMips(ctx)) {
     // Overwrite trap instructions written by Writer::writeTrapInstr.
     memset(buf, 0, pltEntrySize);

diff  --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index a4b19f6c4cdcc0..323b81dc721e44 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -79,7 +79,7 @@ void elf::writePPC32GlinkSection(Ctx &ctx, uint8_t *buf, size_t numEntries) {
   if (!ctx.arg.isPic) {
     for (const Symbol *sym :
          cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts) {
-      writePPC32PltCallStub(ctx, buf, sym->getGotPltVA(), nullptr, 0);
+      writePPC32PltCallStub(ctx, buf, sym->getGotPltVA(ctx), nullptr, 0);
       buf += 16;
       glink += 16;
     }
@@ -181,7 +181,7 @@ void PPC::writeIplt(uint8_t *buf, const Symbol &sym,
                     uint64_t /*pltEntryAddr*/) const {
   // In -pie or -shared mode, assume r30 points to .got2+0x8000, and use a
   // .got2.plt_pic32. thunk.
-  writePPC32PltCallStub(ctx, buf, sym.getGotPltVA(), sym.file, 0x8000);
+  writePPC32PltCallStub(ctx, buf, sym.getGotPltVA(ctx), sym.file, 0x8000);
 }
 
 void PPC::writeGotHeader(uint8_t *buf) const {
@@ -194,14 +194,14 @@ void PPC::writeGotHeader(uint8_t *buf) const {
 void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const {
   // Address of the symbol resolver stub in .glink .
   write32(buf,
-          ctx.in.plt->getVA() + ctx.in.plt->headerSize + 4 * s.getPltIdx());
+          ctx.in.plt->getVA() + ctx.in.plt->headerSize + 4 * s.getPltIdx(ctx));
 }
 
 bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
                      uint64_t branchAddr, const Symbol &s, int64_t a) const {
   if (type != R_PPC_LOCAL24PC && type != R_PPC_REL24 && type != R_PPC_PLTREL24)
     return false;
-  if (s.isInPlt())
+  if (s.isInPlt(ctx))
     return true;
   if (s.isUndefWeak())
     return false;

diff  --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 3373850c67cc64..63f3a32c128c37 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -1161,14 +1161,14 @@ void PPC64::writePltHeader(uint8_t *buf) const {
 
 void PPC64::writePlt(uint8_t *buf, const Symbol &sym,
                      uint64_t /*pltEntryAddr*/) const {
-  int32_t offset = pltHeaderSize + sym.getPltIdx() * pltEntrySize;
+  int32_t offset = pltHeaderSize + sym.getPltIdx(ctx) * pltEntrySize;
   // bl __glink_PLTresolve
   write32(buf, 0x48000000 | ((-offset) & 0x03FFFFFc));
 }
 
 void PPC64::writeIplt(uint8_t *buf, const Symbol &sym,
                       uint64_t /*pltEntryAddr*/) const {
-  writePPC64LoadAndBranch(buf, sym.getGotPltVA() - getPPC64TocBase(ctx));
+  writePPC64LoadAndBranch(buf, sym.getGotPltVA(ctx) - getPPC64TocBase(ctx));
 }
 
 static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
@@ -1429,7 +1429,7 @@ bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
     return false;
 
   // If a function is in the Plt it needs to be called with a call-stub.
-  if (s.isInPlt())
+  if (s.isInPlt(ctx))
     return true;
 
   // This check looks at the st_other bits of the callee with relocation

diff  --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index b099a9a5effdc2..130952b74c43dc 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -247,7 +247,7 @@ void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
   // l[wd] t3, %pcrel_lo(1b)(t3)
   // jalr t1, t3
   // nop
-  uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
+  uint32_t offset = sym.getGotPltVA(ctx) - pltEntryAddr;
   write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
   write32le(buf + 4, itype(ctx.arg.is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
   write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
@@ -737,7 +737,7 @@ static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc,
   const uint64_t insnPair = read64le(sec.content().data() + r.offset);
   const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
   const uint64_t dest =
-      (r.expr == R_PLT_PC ? sym.getPltVA() : sym.getVA()) + r.addend;
+      (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA()) + r.addend;
   const int64_t displace = dest - loc;
 
   if (rvc && isInt<12>(displace) && rd == 0) {

diff  --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index c87ab179651f1e..3757babb720517 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -183,7 +183,7 @@ void SystemZ::writeGotHeader(uint8_t *buf) const {
 }
 
 void SystemZ::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  write64be(buf, s.getPltVA() + 14);
+  write64be(buf, s.getPltVA(ctx) + 14);
 }
 
 void SystemZ::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -227,9 +227,9 @@ void SystemZ::writePlt(uint8_t *buf, const Symbol &sym,
   };
   memcpy(buf, inst, sizeof(inst));
 
-  write32be(buf + 2, (sym.getGotPltVA() - pltEntryAddr) >> 1);
+  write32be(buf + 2, (sym.getGotPltVA(ctx) - pltEntryAddr) >> 1);
   write32be(buf + 24, (ctx.in.plt->getVA() - pltEntryAddr - 22) >> 1);
-  write32be(buf + 28, ctx.in.relaPlt->entsize * sym.getPltIdx());
+  write32be(buf + 28, ctx.in.relaPlt->entsize * sym.getPltIdx(ctx));
 }
 
 int64_t SystemZ::getImplicitAddend(const uint8_t *buf, RelType type) const {
@@ -451,7 +451,7 @@ bool SystemZ::relaxOnce(int pass) const {
         if (isInt<33>(v) && !(v & 1))
           continue;
         if (rel.sym->auxIdx == 0) {
-          rel.sym->allocateAux();
+          rel.sym->allocateAux(ctx);
           addGotEntry(ctx, *rel.sym);
           changed = true;
         }

diff  --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 0343260840a4a9..f0b8a5a71db2b4 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -170,7 +170,7 @@ void X86::writeGotPltHeader(uint8_t *buf) const {
 void X86::writeGotPlt(uint8_t *buf, const Symbol &s) const {
   // Entries in .got.plt initially points back to the corresponding
   // PLT entries with a fixed offset to skip the first instruction.
-  write32le(buf, s.getPltVA() + 6);
+  write32le(buf, s.getPltVA(ctx) + 6);
 }
 
 void X86::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -210,7 +210,7 @@ void X86::writePltHeader(uint8_t *buf) const {
 
 void X86::writePlt(uint8_t *buf, const Symbol &sym,
                    uint64_t pltEntryAddr) const {
-  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
+  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(ctx);
   if (ctx.arg.isPic) {
     const uint8_t inst[] = {
         0xff, 0xa3, 0, 0, 0, 0, // jmp *foo at GOT(%ebx)
@@ -218,7 +218,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym,
         0xe9, 0,    0, 0, 0,    // jmp .PLT0 at PC
     };
     memcpy(buf, inst, sizeof(inst));
-    write32le(buf + 2, sym.getGotPltVA() - ctx.in.gotPlt->getVA());
+    write32le(buf + 2, sym.getGotPltVA(ctx) - ctx.in.gotPlt->getVA());
   } else {
     const uint8_t inst[] = {
         0xff, 0x25, 0, 0, 0, 0, // jmp *foo at GOT
@@ -226,7 +226,7 @@ void X86::writePlt(uint8_t *buf, const Symbol &sym,
         0xe9, 0,    0, 0, 0,    // jmp .PLT0 at PC
     };
     memcpy(buf, inst, sizeof(inst));
-    write32le(buf + 2, sym.getGotPltVA());
+    write32le(buf + 2, sym.getGotPltVA(ctx));
   }
 
   write32le(buf + 7, relOff);
@@ -527,8 +527,8 @@ class IntelIBT : public X86 {
 } // namespace
 
 void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  uint64_t va =
-      ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
+  uint64_t va = ctx.in.ibtPlt->getVA() + IBTPltHeaderSize +
+                s.getPltIdx(ctx) * pltEntrySize;
   write32le(buf, va);
 }
 
@@ -541,7 +541,7 @@ void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
         0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop
     };
     memcpy(buf, inst, sizeof(inst));
-    write32le(buf + 6, sym.getGotPltVA() - ctx.in.gotPlt->getVA());
+    write32le(buf + 6, sym.getGotPltVA(ctx) - ctx.in.gotPlt->getVA());
     return;
   }
 
@@ -551,7 +551,7 @@ void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
       0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop
   };
   memcpy(buf, inst, sizeof(inst));
-  write32le(buf + 6, sym.getGotPltVA());
+  write32le(buf + 6, sym.getGotPltVA(ctx));
 }
 
 void IntelIBT::writeIBTPlt(uint8_t *buf, size_t numEntries) const {
@@ -600,7 +600,7 @@ RetpolinePic::RetpolinePic(Ctx &ctx) : X86(ctx) {
 }
 
 void RetpolinePic::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  write32le(buf, s.getPltVA() + 17);
+  write32le(buf, s.getPltVA(ctx) + 17);
 }
 
 void RetpolinePic::writePltHeader(uint8_t *buf) const {
@@ -626,7 +626,7 @@ void RetpolinePic::writePltHeader(uint8_t *buf) const {
 
 void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym,
                             uint64_t pltEntryAddr) const {
-  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
+  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(ctx);
   const uint8_t insn[] = {
       0x50,                            // pushl %eax
       0x8b, 0x83, 0,    0,    0,    0, // mov foo at GOT(%ebx), %eax
@@ -640,7 +640,7 @@ void RetpolinePic::writePlt(uint8_t *buf, const Symbol &sym,
 
   uint32_t ebx = ctx.in.gotPlt->getVA();
   unsigned off = pltEntryAddr - ctx.in.plt->getVA();
-  write32le(buf + 3, sym.getGotPltVA() - ebx);
+  write32le(buf + 3, sym.getGotPltVA(ctx) - ebx);
   write32le(buf + 8, -off - 12 + 32);
   write32le(buf + 13, -off - 17 + 18);
   write32le(buf + 18, relOff);
@@ -654,7 +654,7 @@ RetpolineNoPic::RetpolineNoPic(Ctx &ctx) : X86(ctx) {
 }
 
 void RetpolineNoPic::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  write32le(buf, s.getPltVA() + 16);
+  write32le(buf, s.getPltVA(ctx) + 16);
 }
 
 void RetpolineNoPic::writePltHeader(uint8_t *buf) const {
@@ -685,7 +685,7 @@ void RetpolineNoPic::writePltHeader(uint8_t *buf) const {
 
 void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
                               uint64_t pltEntryAddr) const {
-  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
+  unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx(ctx);
   const uint8_t insn[] = {
       0x50,                         // 0:  pushl %eax
       0xa1, 0,    0,    0,    0,    // 1:  mov foo_in_GOT, %eax
@@ -699,7 +699,7 @@ void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
   memcpy(buf, insn, sizeof(insn));
 
   unsigned off = pltEntryAddr - ctx.in.plt->getVA();
-  write32le(buf + 2, sym.getGotPltVA());
+  write32le(buf + 2, sym.getGotPltVA(ctx));
   write32le(buf + 7, -off - 11 + 32);
   write32le(buf + 12, -off - 16 + 17);
   write32le(buf + 17, relOff);

diff  --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index 4c70d704496801..b31bc14b888200 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -338,7 +338,7 @@ bool X86_64::relaxOnce(int pass) const {
         if (isInt<32>(v))
           continue;
         if (rel.sym->auxIdx == 0) {
-          rel.sym->allocateAux();
+          rel.sym->allocateAux(ctx);
           addGotEntry(ctx, *rel.sym);
           changed = true;
         }
@@ -417,7 +417,7 @@ void X86_64::writeGotPltHeader(uint8_t *buf) const {
 
 void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {
   // See comments in X86::writeGotPlt.
-  write64le(buf, s.getPltVA() + 6);
+  write64le(buf, s.getPltVA(ctx) + 6);
 }
 
 void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
@@ -448,8 +448,8 @@ void X86_64::writePlt(uint8_t *buf, const Symbol &sym,
   };
   memcpy(buf, inst, sizeof(inst));
 
-  write32le(buf + 2, sym.getGotPltVA() - pltEntryAddr - 6);
-  write32le(buf + 7, sym.getPltIdx());
+  write32le(buf + 2, sym.getGotPltVA(ctx) - pltEntryAddr - 6);
+  write32le(buf + 7, sym.getPltIdx(ctx));
   write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16);
 }
 
@@ -1085,8 +1085,8 @@ class IntelIBT : public X86_64 {
 } // namespace
 
 void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  uint64_t va =
-      ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + s.getPltIdx() * pltEntrySize;
+  uint64_t va = ctx.in.ibtPlt->getVA() + IBTPltHeaderSize +
+                s.getPltIdx(ctx) * pltEntrySize;
   write64le(buf, va);
 }
 
@@ -1098,7 +1098,7 @@ void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
       0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop
   };
   memcpy(buf, Inst, sizeof(Inst));
-  write32le(buf + 6, sym.getGotPltVA() - pltEntryAddr - 10);
+  write32le(buf + 6, sym.getGotPltVA(ctx) - pltEntryAddr - 10);
 }
 
 void IntelIBT::writeIBTPlt(uint8_t *buf, size_t numEntries) const {
@@ -1156,7 +1156,7 @@ Retpoline::Retpoline(Ctx &ctx) : X86_64(ctx) {
 }
 
 void Retpoline::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  write64le(buf, s.getPltVA() + 17);
+  write64le(buf, s.getPltVA(ctx) + 17);
 }
 
 void Retpoline::writePltHeader(uint8_t *buf) const {
@@ -1195,10 +1195,10 @@ void Retpoline::writePlt(uint8_t *buf, const Symbol &sym,
 
   uint64_t off = pltEntryAddr - ctx.in.plt->getVA();
 
-  write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
+  write32le(buf + 3, sym.getGotPltVA(ctx) - pltEntryAddr - 7);
   write32le(buf + 8, -off - 12 + 32);
   write32le(buf + 13, -off - 17 + 18);
-  write32le(buf + 18, sym.getPltIdx());
+  write32le(buf + 18, sym.getPltIdx(ctx));
   write32le(buf + 23, -off - 27);
 }
 
@@ -1233,7 +1233,7 @@ void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym,
   };
   memcpy(buf, insn, sizeof(insn));
 
-  write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
+  write32le(buf + 3, sym.getGotPltVA(ctx) - pltEntryAddr - 7);
   write32le(buf + 8, ctx.in.plt->getVA() - pltEntryAddr - 12);
 }
 

diff  --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index 4328c085426a7c..bdbcdffaf7ce64 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -737,7 +737,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return r.sym->getVA(a) - getARMStaticBase(*r.sym);
   case R_GOT:
   case R_RELAX_TLS_GD_TO_IE_ABS:
-    return r.sym->getGotVA() + a;
+    return r.sym->getGotVA(ctx) + a;
   case R_LOONGARCH_GOT:
     // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc r.type
     // for their page offsets. The arithmetics are 
diff erent in the TLS case
@@ -745,7 +745,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     if (r.sym->hasFlag(NEEDS_TLSGD) && r.type != R_LARCH_TLS_IE_PC_LO12)
       // Like R_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
       return ctx.in.got->getGlobalDynAddr(*r.sym) + a;
-    return r.sym->getGotVA() + a;
+    return r.sym->getGotVA(ctx) + a;
   case R_GOTONLY_PC:
     return ctx.in.got->getVA() + a - p;
   case R_GOTPLTONLY_PC:
@@ -757,28 +757,28 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return r.sym->getVA(a) - ctx.in.gotPlt->getVA();
   case R_GOTPLT:
   case R_RELAX_TLS_GD_TO_IE_GOTPLT:
-    return r.sym->getGotVA() + a - ctx.in.gotPlt->getVA();
+    return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA();
   case R_TLSLD_GOT_OFF:
   case R_GOT_OFF:
   case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
-    return r.sym->getGotOffset() + a;
+    return r.sym->getGotOffset(ctx) + a;
   case R_AARCH64_GOT_PAGE_PC:
   case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
-    return getAArch64Page(r.sym->getGotVA() + a) - getAArch64Page(p);
+    return getAArch64Page(r.sym->getGotVA(ctx) + a) - getAArch64Page(p);
   case R_AARCH64_GOT_PAGE:
-    return r.sym->getGotVA() + a - getAArch64Page(ctx.in.got->getVA());
+    return r.sym->getGotVA(ctx) + a - getAArch64Page(ctx.in.got->getVA());
   case R_GOT_PC:
   case R_RELAX_TLS_GD_TO_IE:
-    return r.sym->getGotVA() + a - p;
+    return r.sym->getGotVA(ctx) + a - p;
   case R_GOTPLT_GOTREL:
-    return r.sym->getGotPltVA() + a - ctx.in.got->getVA();
+    return r.sym->getGotPltVA(ctx) + a - ctx.in.got->getVA();
   case R_GOTPLT_PC:
-    return r.sym->getGotPltVA() + a - p;
+    return r.sym->getGotPltVA(ctx) + a - p;
   case R_LOONGARCH_GOT_PAGE_PC:
     if (r.sym->hasFlag(NEEDS_TLSGD))
       return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
                                    r.type);
-    return getLoongArchPageDelta(r.sym->getGotVA() + a, p, r.type);
+    return getLoongArchPageDelta(r.sym->getGotVA(ctx) + a, p, r.type);
   case R_MIPS_GOTREL:
     return r.sym->getVA(a) - ctx.in.mipsGot->getGp(file);
   case R_MIPS_GOT_GP:
@@ -860,21 +860,21 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return dest - p;
   }
   case R_PLT:
-    return r.sym->getPltVA() + a;
+    return r.sym->getPltVA(ctx) + a;
   case R_PLT_PC:
   case R_PPC64_CALL_PLT:
-    return r.sym->getPltVA() + a - p;
+    return r.sym->getPltVA(ctx) + a - p;
   case R_LOONGARCH_PLT_PAGE_PC:
-    return getLoongArchPageDelta(r.sym->getPltVA() + a, p, r.type);
+    return getLoongArchPageDelta(r.sym->getPltVA(ctx) + a, p, r.type);
   case R_PLT_GOTPLT:
-    return r.sym->getPltVA() + a - ctx.in.gotPlt->getVA();
+    return r.sym->getPltVA(ctx) + a - ctx.in.gotPlt->getVA();
   case R_PLT_GOTREL:
-    return r.sym->getPltVA() + a - ctx.in.got->getVA();
+    return r.sym->getPltVA(ctx) + a - ctx.in.got->getVA();
   case R_PPC32_PLTREL:
     // R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
     // stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
     // target VA computation.
-    return r.sym->getPltVA() - p;
+    return r.sym->getPltVA(ctx) - p;
   case R_PPC64_CALL: {
     uint64_t symVA = r.sym->getVA(a);
     // If we have an undefined weak symbol, we might get here with a symbol

diff  --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index a70d26fb418d6e..ba2d493c28213f 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -921,7 +921,7 @@ static void addPltEntry(PltSection &plt, GotPltSection &gotPlt,
                         RelocationBaseSection &rel, RelType type, Symbol &sym) {
   plt.addEntry(sym);
   gotPlt.addEntry(sym);
-  rel.addReloc({type, &gotPlt, sym.getGotPltOffset(),
+  rel.addReloc({type, &gotPlt, sym.getGotPltOffset(ctx),
                 sym.isPreemptible ? DynamicReloc::AgainstSymbol
                                   : DynamicReloc::AddendOnlyWithTargetVA,
                 sym, 0, R_ABS});
@@ -929,7 +929,7 @@ static void addPltEntry(PltSection &plt, GotPltSection &gotPlt,
 
 void elf::addGotEntry(Ctx &ctx, Symbol &sym) {
   ctx.in.got->addEntry(sym);
-  uint64_t off = sym.getGotOffset();
+  uint64_t off = sym.getGotOffset(ctx);
 
   // If preemptible, emit a GLOB_DAT relocation.
   if (sym.isPreemptible) {
@@ -950,7 +950,7 @@ void elf::addGotEntry(Ctx &ctx, Symbol &sym) {
 
 static void addTpOffsetGotEntry(Ctx &ctx, Symbol &sym) {
   ctx.in.got->addEntry(sym);
-  uint64_t off = sym.getGotOffset();
+  uint64_t off = sym.getGotOffset(ctx);
   if (!sym.isPreemptible && !ctx.arg.shared) {
     ctx.in.got->addConstant({R_TPREL, ctx.target->symbolicRel, off, 0, &sym});
     return;
@@ -1753,19 +1753,19 @@ static bool handleNonPreemptibleIfunc(Ctx &ctx, Symbol &sym, uint16_t flags) {
   // --pack-relative-relocs=android+relr is enabled. Work around this by placing
   // IRELATIVE in .rela.plt.
   auto *directSym = makeDefined(cast<Defined>(sym));
-  directSym->allocateAux();
+  directSym->allocateAux(ctx);
   auto &dyn =
       ctx.arg.androidPackDynRelocs ? *ctx.in.relaPlt : *ctx.mainPart->relaDyn;
   addPltEntry(*ctx.in.iplt, *ctx.in.igotPlt, dyn, ctx.target->iRelativeRel,
               *directSym);
-  sym.allocateAux();
+  sym.allocateAux(ctx);
   ctx.symAux.back().pltIdx = ctx.symAux[directSym->auxIdx].pltIdx;
 
   if (flags & HAS_DIRECT_RELOC) {
     // Change the value to the IPLT and redirect all references to it.
     auto &d = cast<Defined>(sym);
     d.section = ctx.in.iplt.get();
-    d.value = d.getPltIdx() * ctx.target->ipltEntrySize;
+    d.value = d.getPltIdx(ctx) * ctx.target->ipltEntrySize;
     d.size = 0;
     // It's important to set the symbol type here so that dynamic loaders
     // don't try to call the PLT as if it were an ifunc resolver.
@@ -1791,7 +1791,7 @@ void elf::postScanRelocations(Ctx &ctx) {
 
     if (!sym.needsDynReloc())
       return;
-    sym.allocateAux();
+    sym.allocateAux(ctx);
 
     if (flags & NEEDS_GOT)
       addGotEntry(ctx, sym);
@@ -1809,7 +1809,7 @@ void elf::postScanRelocations(Ctx &ctx) {
         if (!sym.isDefined()) {
           replaceWithDefined(sym, *ctx.in.plt,
                              ctx.target->pltHeaderSize +
-                                 ctx.target->pltEntrySize * sym.getPltIdx(),
+                                 ctx.target->pltEntrySize * sym.getPltIdx(ctx),
                              0);
           sym.setFlags(NEEDS_COPY);
           if (ctx.arg.emachine == EM_PPC) {
@@ -1855,12 +1855,12 @@ void elf::postScanRelocations(Ctx &ctx) {
     if (flags & NEEDS_TLSGD_TO_IE) {
       got->addEntry(sym);
       ctx.mainPart->relaDyn->addSymbolReloc(ctx.target->tlsGotRel, *got,
-                                            sym.getGotOffset(), sym);
+                                            sym.getGotOffset(ctx), sym);
     }
     if (flags & NEEDS_GOT_DTPREL) {
       got->addEntry(sym);
       got->addConstant(
-          {R_ABS, ctx.target->tlsOffsetRel, sym.getGotOffset(), 0, &sym});
+          {R_ABS, ctx.target->tlsOffsetRel, sym.getGotOffset(ctx), 0, &sym});
     }
 
     if ((flags & NEEDS_TLSIE) && !(flags & NEEDS_TLSGD_TO_IE))
@@ -2247,7 +2247,7 @@ std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
   // offset + addend) pair. We may revert the relocation back to its original
   // non-Thunk target, so we cannot fold offset + addend.
   if (auto *d = dyn_cast<Defined>(rel.sym))
-    if (!d->isInPlt() && d->section)
+    if (!d->isInPlt(ctx) && d->section)
       thunkVec = &thunkedSymbolsBySectionAndAddend[{{d->section, d->value},
                                                     keyAddend}];
   if (!thunkVec)
@@ -2286,7 +2286,7 @@ bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
       return true;
     rel.sym = &t->destination;
     rel.addend = t->addend;
-    if (rel.sym->isInPlt())
+    if (rel.sym->isInPlt(ctx))
       rel.expr = toPlt(rel.expr);
   }
   return false;
@@ -2427,7 +2427,7 @@ void elf::hexagonTLSSymbolUpdate(Ctx &ctx) {
           for (Relocation &rel : isec->relocs())
             if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
               if (needEntry) {
-                sym->allocateAux();
+                sym->allocateAux(ctx);
                 addPltEntry(*ctx.in.plt, *ctx.in.gotPlt, *ctx.in.relaPlt,
                             ctx.target->pltRel, *sym);
                 needEntry = false;

diff  --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp
index 0b21c95720a4c8..580317c601973d 100644
--- a/lld/ELF/Symbols.cpp
+++ b/lld/ELF/Symbols.cpp
@@ -145,34 +145,34 @@ uint64_t Symbol::getVA(int64_t addend) const {
   return getSymVA(*this, addend) + addend;
 }
 
-uint64_t Symbol::getGotVA() const {
+uint64_t Symbol::getGotVA(Ctx &ctx) const {
   if (gotInIgot)
-    return ctx.in.igotPlt->getVA() + getGotPltOffset();
-  return ctx.in.got->getVA() + getGotOffset();
+    return ctx.in.igotPlt->getVA() + getGotPltOffset(ctx);
+  return ctx.in.got->getVA() + getGotOffset(ctx);
 }
 
-uint64_t Symbol::getGotOffset() const {
-  return getGotIdx() * ctx.target->gotEntrySize;
+uint64_t Symbol::getGotOffset(Ctx &ctx) const {
+  return getGotIdx(ctx) * ctx.target->gotEntrySize;
 }
 
-uint64_t Symbol::getGotPltVA() const {
+uint64_t Symbol::getGotPltVA(Ctx &ctx) const {
   if (isInIplt)
-    return ctx.in.igotPlt->getVA() + getGotPltOffset();
-  return ctx.in.gotPlt->getVA() + getGotPltOffset();
+    return ctx.in.igotPlt->getVA() + getGotPltOffset(ctx);
+  return ctx.in.gotPlt->getVA() + getGotPltOffset(ctx);
 }
 
-uint64_t Symbol::getGotPltOffset() const {
+uint64_t Symbol::getGotPltOffset(Ctx &ctx) const {
   if (isInIplt)
-    return getPltIdx() * ctx.target->gotEntrySize;
-  return (getPltIdx() + ctx.target->gotPltHeaderEntriesNum) *
+    return getPltIdx(ctx) * ctx.target->gotEntrySize;
+  return (getPltIdx(ctx) + ctx.target->gotPltHeaderEntriesNum) *
          ctx.target->gotEntrySize;
 }
 
-uint64_t Symbol::getPltVA() const {
-  uint64_t outVA =
-      isInIplt ? ctx.in.iplt->getVA() + getPltIdx() * ctx.target->ipltEntrySize
-               : ctx.in.plt->getVA() + ctx.in.plt->headerSize +
-                     getPltIdx() * ctx.target->pltEntrySize;
+uint64_t Symbol::getPltVA(Ctx &ctx) const {
+  uint64_t outVA = isInIplt ? ctx.in.iplt->getVA() +
+                                  getPltIdx(ctx) * ctx.target->ipltEntrySize
+                            : ctx.in.plt->getVA() + ctx.in.plt->headerSize +
+                                  getPltIdx(ctx) * ctx.target->pltEntrySize;
 
   // While linking microMIPS code PLT code are always microMIPS
   // code. Set the less-significant bit to track that fact.

diff  --git a/lld/ELF/Symbols.h b/lld/ELF/Symbols.h
index 57aa423f28cae5..f11136fb04c11e 100644
--- a/lld/ELF/Symbols.h
+++ b/lld/ELF/Symbols.h
@@ -200,21 +200,23 @@ class Symbol {
   // truncated by Symbol::parseSymbolVersion().
   const char *getVersionSuffix() const { return nameData + nameSize; }
 
-  uint32_t getGotIdx() const { return ctx.symAux[auxIdx].gotIdx; }
-  uint32_t getPltIdx() const { return ctx.symAux[auxIdx].pltIdx; }
-  uint32_t getTlsDescIdx() const { return ctx.symAux[auxIdx].tlsDescIdx; }
-  uint32_t getTlsGdIdx() const { return ctx.symAux[auxIdx].tlsGdIdx; }
+  uint32_t getGotIdx(Ctx &ctx) const { return ctx.symAux[auxIdx].gotIdx; }
+  uint32_t getPltIdx(Ctx &ctx) const { return ctx.symAux[auxIdx].pltIdx; }
+  uint32_t getTlsDescIdx(Ctx &ctx) const {
+    return ctx.symAux[auxIdx].tlsDescIdx;
+  }
+  uint32_t getTlsGdIdx(Ctx &ctx) const { return ctx.symAux[auxIdx].tlsGdIdx; }
 
-  bool isInGot() const { return getGotIdx() != uint32_t(-1); }
-  bool isInPlt() const { return getPltIdx() != uint32_t(-1); }
+  bool isInGot(Ctx &ctx) const { return getGotIdx(ctx) != uint32_t(-1); }
+  bool isInPlt(Ctx &ctx) const { return getPltIdx(ctx) != uint32_t(-1); }
 
   uint64_t getVA(int64_t addend = 0) const;
 
-  uint64_t getGotOffset() const;
-  uint64_t getGotVA() const;
-  uint64_t getGotPltOffset() const;
-  uint64_t getGotPltVA() const;
-  uint64_t getPltVA() const;
+  uint64_t getGotOffset(Ctx &) const;
+  uint64_t getGotVA(Ctx &) const;
+  uint64_t getGotPltOffset(Ctx &) const;
+  uint64_t getGotPltVA(Ctx &) const;
+  uint64_t getPltVA(Ctx &) const;
   uint64_t getSize() const;
   OutputSection *getOutputSection() const;
 
@@ -344,7 +346,7 @@ class Symbol {
            (NEEDS_COPY | NEEDS_GOT | NEEDS_PLT | NEEDS_TLSDESC | NEEDS_TLSGD |
             NEEDS_TLSGD_TO_IE | NEEDS_GOT_DTPREL | NEEDS_TLSIE);
   }
-  void allocateAux() {
+  void allocateAux(Ctx &ctx) {
     assert(auxIdx == 0);
     auxIdx = ctx.symAux.size();
     ctx.symAux.emplace_back();

diff  --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index 600a3c3389c4d8..5704129019a1b5 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -688,7 +688,7 @@ bool GotSection::addTlsIndex() {
 }
 
 uint32_t GotSection::getTlsDescOffset(const Symbol &sym) const {
-  return sym.getTlsDescIdx() * ctx.arg.wordsize;
+  return sym.getTlsDescIdx(ctx) * ctx.arg.wordsize;
 }
 
 uint64_t GotSection::getTlsDescAddr(const Symbol &sym) const {
@@ -696,11 +696,11 @@ uint64_t GotSection::getTlsDescAddr(const Symbol &sym) const {
 }
 
 uint64_t GotSection::getGlobalDynAddr(const Symbol &b) const {
-  return this->getVA() + b.getTlsGdIdx() * ctx.arg.wordsize;
+  return this->getVA() + b.getTlsGdIdx(ctx) * ctx.arg.wordsize;
 }
 
 uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const {
-  return b.getTlsGdIdx() * ctx.arg.wordsize;
+  return b.getTlsGdIdx(ctx) * ctx.arg.wordsize;
 }
 
 void GotSection::finalizeContents(Ctx &) {
@@ -1002,12 +1002,12 @@ void MipsGotSection::build() {
   // value later in the `sortMipsSymbols` function.
   for (auto &p : primGot->global) {
     if (p.first->auxIdx == 0)
-      p.first->allocateAux();
+      p.first->allocateAux(ctx);
     ctx.symAux.back().gotIdx = p.second;
   }
   for (auto &p : primGot->relocs) {
     if (p.first->auxIdx == 0)
-      p.first->allocateAux();
+      p.first->allocateAux(ctx);
     ctx.symAux.back().gotIdx = p.second;
   }
 
@@ -2125,11 +2125,11 @@ static bool sortMipsSymbols(const SymbolTableEntry &l,
                             const SymbolTableEntry &r) {
   // Sort entries related to non-local preemptible symbols by GOT indexes.
   // All other entries go to the beginning of a dynsym in arbitrary order.
-  if (l.sym->isInGot() && r.sym->isInGot())
-    return l.sym->getGotIdx() < r.sym->getGotIdx();
-  if (!l.sym->isInGot() && !r.sym->isInGot())
+  if (l.sym->isInGot(ctx) && r.sym->isInGot(ctx))
+    return l.sym->getGotIdx(ctx) < r.sym->getGotIdx(ctx);
+  if (!l.sym->isInGot(ctx) && !r.sym->isInGot(ctx))
     return false;
-  return !l.sym->isInGot();
+  return !l.sym->isInGot(ctx);
 }
 
 void SymbolTableBaseSection::finalizeContents(Ctx &) {
@@ -2300,7 +2300,7 @@ void SymbolTableSection<ELFT>::writeTo(Ctx &ctx, uint8_t *buf) {
 
     for (SymbolTableEntry &ent : symbols) {
       Symbol *sym = ent.sym;
-      if (sym->isInPlt() && sym->hasFlag(NEEDS_COPY))
+      if (sym->isInPlt(ctx) && sym->hasFlag(NEEDS_COPY))
         eSym->st_other |= STO_MIPS_PLT;
       if (isMicroMips(ctx)) {
         // We already set the less-significant bit for symbols

diff  --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index 629ef994773229..68dc5ceeab2670 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -550,7 +550,7 @@ void Thunk::setOffset(uint64_t newOffset) {
 
 // AArch64 Thunk base class.
 static uint64_t getAArch64ThunkDestVA(const Symbol &s, int64_t a) {
-  uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA(a);
+  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(a);
   return v;
 }
 
@@ -677,7 +677,7 @@ void AArch64BTILandingPadThunk::writeLong(uint8_t *buf) {
 
 // ARM Target Thunks
 static uint64_t getARMThunkDestVA(const Symbol &s) {
-  uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA();
+  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA();
   return SignExtend64<32>(v);
 }
 
@@ -1171,7 +1171,7 @@ void elf::writePPC32PltCallStub(Ctx &ctx, uint8_t *buf, uint64_t gotPltVA,
 }
 
 void PPC32PltCallStub::writeTo(uint8_t *buf) {
-  writePPC32PltCallStub(ctx, buf, destination.getGotPltVA(), file, addend);
+  writePPC32PltCallStub(ctx, buf, destination.getGotPltVA(ctx), file, addend);
 }
 
 void PPC32PltCallStub::addSymbols(ThunkSection &isec) {
@@ -1231,7 +1231,7 @@ void elf::writePPC64LoadAndBranch(uint8_t *buf, int64_t offset) {
 }
 
 void PPC64PltCallStub::writeTo(uint8_t *buf) {
-  int64_t offset = destination.getGotPltVA() - getPPC64TocBase(ctx);
+  int64_t offset = destination.getGotPltVA(ctx) - getPPC64TocBase(ctx);
   // Save the TOC pointer to the save-slot reserved in the call frame.
   write32(buf + 0, 0xf8410018); // std     r2,24(r1)
   writePPC64LoadAndBranch(buf + 4, offset);
@@ -1293,8 +1293,9 @@ bool PPC64R2SaveStub::isCompatibleWith(const InputSection &isec,
 }
 
 void PPC64R12SetupStub::writeTo(uint8_t *buf) {
-  int64_t offset = (gotPlt ? destination.getGotPltVA() : destination.getVA()) -
-                   getThunkTargetSym()->getVA();
+  int64_t offset =
+      (gotPlt ? destination.getGotPltVA(ctx) : destination.getVA()) -
+      getThunkTargetSym()->getVA();
   if (!isInt<34>(offset))
     reportRangeError(ctx, buf, offset, 34, destination,
                      "R12 setup stub offset");
@@ -1526,7 +1527,7 @@ static Thunk *addThunkPPC32(Ctx &ctx, const InputSection &isec,
   assert((rel.type == R_PPC_LOCAL24PC || rel.type == R_PPC_REL24 ||
           rel.type == R_PPC_PLTREL24) &&
          "unexpected relocation type for thunk");
-  if (s.isInPlt())
+  if (s.isInPlt(ctx))
     return make<PPC32PltCallStub>(ctx, isec, rel, s);
   return make<PPC32LongThunk>(ctx, s, rel.addend);
 }
@@ -1541,7 +1542,7 @@ static Thunk *addThunkPPC64(Ctx &ctx, RelType type, Symbol &s, int64_t a) {
   if (type == R_PPC64_REL24_NOTOC)
     ctx.target->ppc64DynamicSectionOpt = 0x2;
 
-  if (s.isInPlt())
+  if (s.isInPlt(ctx))
     return type == R_PPC64_REL24_NOTOC
                ? (Thunk *)make<PPC64R12SetupStub>(ctx, s, /*gotPlt=*/true)
                : (Thunk *)make<PPC64PltCallStub>(ctx, s);


        


More information about the llvm-commits mailing list