[lld] 861bd36 - [ELF] Pass Ctx & to Symbol::getVA

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 19 20:33:03 PDT 2024


Author: Fangrui Song
Date: 2024-10-19T20:32:58-07:00
New Revision: 861bd36bce3c3e1384b87b0366cf83e2c022c325

URL: https://github.com/llvm/llvm-project/commit/861bd36bce3c3e1384b87b0366cf83e2c022c325
DIFF: https://github.com/llvm/llvm-project/commit/861bd36bce3c3e1384b87b0366cf83e2c022c325.diff

LOG: [ELF] Pass Ctx & to Symbol::getVA

Added: 
    

Modified: 
    lld/ELF/AArch64ErrataFix.cpp
    lld/ELF/ARMErrataFix.cpp
    lld/ELF/Arch/AArch64.cpp
    lld/ELF/Arch/ARM.cpp
    lld/ELF/Arch/AVR.cpp
    lld/ELF/Arch/LoongArch.cpp
    lld/ELF/Arch/Mips.cpp
    lld/ELF/Arch/PPC.cpp
    lld/ELF/Arch/PPC64.cpp
    lld/ELF/Arch/RISCV.cpp
    lld/ELF/Arch/SystemZ.cpp
    lld/ELF/Arch/X86.cpp
    lld/ELF/Arch/X86_64.cpp
    lld/ELF/InputSection.cpp
    lld/ELF/MapFile.cpp
    lld/ELF/OutputSections.cpp
    lld/ELF/Relocations.cpp
    lld/ELF/Symbols.cpp
    lld/ELF/Symbols.h
    lld/ELF/SyntheticSections.cpp
    lld/ELF/Thunks.cpp
    lld/ELF/Writer.cpp

Removed: 
    


################################################################################
diff  --git a/lld/ELF/AArch64ErrataFix.cpp b/lld/ELF/AArch64ErrataFix.cpp
index cd8fbf16f5b839..f9e03ce5bbe4db 100644
--- a/lld/ELF/AArch64ErrataFix.cpp
+++ b/lld/ELF/AArch64ErrataFix.cpp
@@ -417,7 +417,7 @@ void Patch843419Section::writeTo(uint8_t *buf) {
 
   // Return address is the next instruction after the one we have just copied.
   uint64_t s = getLDSTAddr() + 4;
-  uint64_t p = patchSym->getVA() + 4;
+  uint64_t p = patchSym->getVA(ctx) + 4;
   ctx.target->relocateNoSym(buf + 4, R_AARCH64_JUMP26, s - p);
 }
 

diff  --git a/lld/ELF/ARMErrataFix.cpp b/lld/ELF/ARMErrataFix.cpp
index 630084afd509ce..6d759d7dec1d8a 100644
--- a/lld/ELF/ARMErrataFix.cpp
+++ b/lld/ELF/ARMErrataFix.cpp
@@ -218,7 +218,7 @@ static bool branchDestInFirstRegion(Ctx &ctx, const InputSection *isec,
   // or the PLT.
   if (r) {
     uint64_t dst =
-        (r->expr == R_PLT_PC) ? r->sym->getPltVA(ctx) : r->sym->getVA();
+        r->expr == R_PLT_PC ? r->sym->getPltVA(ctx) : r->sym->getVA(ctx);
     // Account for Thumb PC bias, usually cancelled to 0 by addend of -4.
     destAddr = dst + r->addend + 4;
   } else {
@@ -449,7 +449,7 @@ static void implementPatch(ScanResult sr, InputSection *isec,
       // Thunk from the patch to the target.
       uint64_t dstSymAddr = (sr.rel->expr == R_PLT_PC)
                                 ? sr.rel->sym->getPltVA(ctx)
-                                : sr.rel->sym->getVA();
+                                : sr.rel->sym->getVA(ctx);
       destIsARM = (dstSymAddr & 1) == 0;
     }
     psec = make<Patch657417Section>(ctx, isec, sr.off, sr.instr, destIsARM);

diff  --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 260307ac4c3dcb..f4f867d019136e 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -360,7 +360,7 @@ void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
 
 void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   if (ctx.arg.writeAddends)
-    write64(ctx, buf, s.getVA());
+    write64(ctx, buf, s.getVA(ctx));
 }
 
 void AArch64::writePltHeader(uint8_t *buf) const {
@@ -416,7 +416,7 @@ bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 &&
       type != R_AARCH64_PLT32)
     return false;
-  uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(a);
+  uint64_t dst = expr == R_PLT_PC ? s.getPltVA(ctx) : s.getVA(ctx, a);
   return !inBranchRange(type, branchAddr, dst);
 }
 
@@ -808,7 +808,7 @@ bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
 
   Symbol &sym = *adrpRel.sym;
   // Check if the address 
diff erence is within 1MiB range.
-  int64_t val = sym.getVA() - (secAddr + addRel.offset);
+  int64_t val = sym.getVA(ctx) - (secAddr + addRel.offset);
   if (val < -1024 * 1024 || val >= 1024 * 1024)
     return false;
 
@@ -874,7 +874,7 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
     return false;
   // Check if the address 
diff erence is within 4GB range.
   int64_t val =
-      getAArch64Page(sym.getVA()) - getAArch64Page(secAddr + adrpRel.offset);
+      getAArch64Page(sym.getVA(ctx)) - getAArch64Page(secAddr + adrpRel.offset);
   if (val != llvm::SignExtend64(val, 33))
     return false;
 
@@ -890,11 +890,11 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
 
   ctx.target->relocate(
       buf + adrpSymRel.offset, adrpSymRel,
-      SignExtend64(getAArch64Page(sym.getVA()) -
+      SignExtend64(getAArch64Page(sym.getVA(ctx)) -
                        getAArch64Page(secAddr + adrpSymRel.offset),
                    64));
   ctx.target->relocate(buf + addRel.offset, addRel,
-                       SignExtend64(sym.getVA(), 64));
+                       SignExtend64(sym.getVA(ctx), 64));
   tryRelaxAdrpAdd(adrpSymRel, addRel, secAddr, buf);
   return true;
 }

diff  --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 1cc396aa395d3b..be3f80337aae71 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -213,7 +213,7 @@ void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
 
 void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   // An ARM entry is the address of the ifunc resolver function.
-  write32(ctx, buf, s.getVA());
+  write32(ctx, buf, s.getVA(ctx));
 }
 
 // Long form PLT Header that does not have any restrictions on the displacement
@@ -404,26 +404,26 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 set (Thumb).
     assert(!useThumbPLTs(ctx) &&
            "If the source is ARM, we should not need Thumb PLTs");
-    if (s.isFunc() && expr == R_PC && (s.getVA() & 1))
+    if (s.isFunc() && expr == R_PC && (s.getVA(ctx) & 1))
       return true;
     [[fallthrough]];
   case R_ARM_CALL: {
-    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA();
+    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(ctx);
     return !inBranchRange(type, branchAddr, dst + a) ||
-        (!ctx.arg.armHasBlx && (s.getVA() & 1));
+           (!ctx.arg.armHasBlx && (s.getVA(ctx) & 1));
   }
   case R_ARM_THM_JUMP19:
   case R_ARM_THM_JUMP24:
     // Source is Thumb, when all PLT entries are ARM interworking is required.
     // Otherwise we need to interwork if STT_FUNC Symbol has bit 0 clear (ARM).
     if ((expr == R_PLT_PC && !useThumbPLTs(ctx)) ||
-        (s.isFunc() && (s.getVA() & 1) == 0))
+        (s.isFunc() && (s.getVA(ctx) & 1) == 0))
       return true;
     [[fallthrough]];
   case R_ARM_THM_CALL: {
-    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA();
+    uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA(ctx) : s.getVA(ctx);
     return !inBranchRange(type, branchAddr, dst + a) ||
-        (!ctx.arg.armHasBlx && (s.getVA() & 1) == 0);;
+           (!ctx.arg.armHasBlx && (s.getVA(ctx) & 1) == 0);
   }
   }
   return false;
@@ -1399,7 +1399,7 @@ void ArmCmseSGSection::writeTo(uint8_t *buf) {
     write16(ctx, p + 4, 0xf000); // B.W S
     write16(ctx, p + 6, 0xb000);
     ctx.target->relocateNoSym(p + 4, R_ARM_THM_JUMP24,
-                              s->acleSeSym->getVA() -
+                              s->acleSeSym->getVA(ctx) -
                                   (getVA() + s->offset + s->size));
   }
 }
@@ -1466,16 +1466,15 @@ template <typename ELFT> void elf::writeARMCmseImportLib(Ctx &ctx) {
   osIsPairs.emplace_back(make<OutputSection>(ctx, shstrtab->name, 0, 0),
                          shstrtab);
 
-  std::sort(ctx.symtab->cmseSymMap.begin(), ctx.symtab->cmseSymMap.end(),
-            [](const auto &a, const auto &b) -> bool {
-              return a.second.sym->getVA() < b.second.sym->getVA();
-            });
+  llvm::sort(ctx.symtab->cmseSymMap, [&](const auto &a, const auto &b) {
+    return a.second.sym->getVA(ctx) < b.second.sym->getVA(ctx);
+  });
   // Copy the secure gateway entry symbols to the import library symbol table.
   for (auto &p : ctx.symtab->cmseSymMap) {
     Defined *d = cast<Defined>(p.second.sym);
     impSymTab->addSymbol(makeDefined(
         ctx, ctx.internalFile, d->getName(), d->computeBinding(ctx),
-        /*stOther=*/0, STT_FUNC, d->getVA(), d->getSize(), nullptr));
+        /*stOther=*/0, STT_FUNC, d->getVA(ctx), d->getSize(), nullptr));
   }
 
   size_t idx = 0;

diff  --git a/lld/ELF/Arch/AVR.cpp b/lld/ELF/Arch/AVR.cpp
index 4dc605c47059c1..64790f1ce83ab3 100644
--- a/lld/ELF/Arch/AVR.cpp
+++ b/lld/ELF/Arch/AVR.cpp
@@ -110,7 +110,7 @@ bool AVR::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   case R_AVR_HI8_LDI_GS:
     // A thunk is needed if the symbol's virtual address is out of range
     // [0, 0x1ffff].
-    return s.getVA() >= 0x20000;
+    return s.getVA(ctx) >= 0x20000;
   default:
     return false;
   }

diff  --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 5923cda2298b4e..876aadcb91511b 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -316,9 +316,9 @@ void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   if (ctx.arg.writeAddends) {
     if (ctx.arg.is64)
-      write64le(buf, s.getVA());
+      write64le(buf, s.getVA(ctx));
     else
-      write32le(buf, s.getVA());
+      write32le(buf, s.getVA(ctx));
   }
 }
 

diff  --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index 1d3000793ca268..d84e85239d2ec2 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -96,7 +96,7 @@ RelExpr MIPS<ELFT>::getRelExpr(RelType type, const Symbol &s,
     // If the target symbol is not preemptible and is not microMIPS,
     // it might be possible to replace jalr/jr instruction by bal/b.
     // It depends on the target symbol's offset.
-    if (!s.isPreemptible && !(s.getVA() & 0x1))
+    if (!s.isPreemptible && !(s.getVA(ctx) & 0x1))
       return R_PC;
     return R_NONE;
   case R_MICROMIPS_JALR:

diff  --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index 3af4101fff606f..2cd526020f7d35 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -209,7 +209,7 @@ bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
     return true;
   if (s.isUndefWeak())
     return false;
-  return !PPC::inBranchRange(type, branchAddr, s.getVA(a));
+  return !PPC::inBranchRange(type, branchAddr, s.getVA(ctx, a));
 }
 
 uint32_t PPC::getThunkSectionSpacing() const { return 0x2000000; }

diff  --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 9f550745f93b2a..d0f59681ccbd3c 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -404,7 +404,7 @@ static bool tryRelaxPPC64TocIndirection(Ctx &ctx, const Relocation &rel,
   assert(!d->isGnuIFunc());
 
   // Two instructions can materialize a 32-bit signed offset from the toc base.
-  uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase(ctx);
+  uint64_t tocRelative = d->getVA(ctx, addend) - getPPC64TocBase(ctx);
   if (!isInt<32>(tocRelative))
     return false;
 
@@ -1452,7 +1452,7 @@ bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   // a range-extending thunk.
   // See the comment in getRelocTargetVA() about R_PPC64_CALL.
   return !inBranchRange(type, branchAddr,
-                        s.getVA(a) +
+                        s.getVA(ctx, a) +
                             getPPC64GlobalEntryToLocalEntryOffset(s.stOther));
 }
 

diff  --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 1ae016e4de01ee..7ebb67c3612311 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -214,9 +214,9 @@ void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   if (ctx.arg.writeAddends) {
     if (ctx.arg.is64)
-      write64le(buf, s.getVA());
+      write64le(buf, s.getVA(ctx));
     else
-      write32le(buf, s.getVA());
+      write32le(buf, s.getVA(ctx));
   }
 }
 
@@ -466,7 +466,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case INTERNAL_R_RISCV_GPREL_I:
   case INTERNAL_R_RISCV_GPREL_S: {
     Defined *gp = ctx.sym.riscvGlobalPointer;
-    int64_t displace = SignExtend64(val - gp->getVA(), bits);
+    int64_t displace = SignExtend64(val - gp->getVA(ctx), bits);
     checkInt(ctx, loc, displace, 12, rel);
     uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15);
     if (rel.type == INTERNAL_R_RISCV_GPREL_I)
@@ -657,7 +657,8 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
         const Relocation &rel1 = relocs[i + 1];
         if (rel.type == R_RISCV_SET_ULEB128 &&
             rel1.type == R_RISCV_SUB_ULEB128 && rel.offset == rel1.offset) {
-          auto val = rel.sym->getVA(rel.addend) - rel1.sym->getVA(rel1.addend);
+          auto val = rel.sym->getVA(ctx, rel.addend) -
+                     rel1.sym->getVA(ctx, rel1.addend);
           if (overwriteULEB128(loc, val) >= 0x80)
             errorOrWarn(sec.getLocation(rel.offset) + ": ULEB128 value " +
                         Twine(val) + " exceeds available space; references '" +
@@ -737,7 +738,7 @@ static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc,
   const uint64_t insnPair = read64le(sec.content().data() + r.offset);
   const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
   const uint64_t dest =
-      (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA()) + r.addend;
+      (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA(ctx)) + r.addend;
   const int64_t displace = dest - loc;
 
   if (rvc && isInt<12>(displace) && rd == 0) {
@@ -759,7 +760,7 @@ static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc,
 // Relax local-exec TLS when hi20 is zero.
 static void relaxTlsLe(const InputSection &sec, size_t i, uint64_t loc,
                        Relocation &r, uint32_t &remove) {
-  uint64_t val = r.sym->getVA(r.addend);
+  uint64_t val = r.sym->getVA(ctx, r.addend);
   if (hi20(val) != 0)
     return;
   uint32_t insn = read32le(sec.content().data() + r.offset);
@@ -791,7 +792,7 @@ static void relaxHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i,
   if (!gp)
     return;
 
-  if (!isInt<12>(r.sym->getVA(r.addend) - gp->getVA()))
+  if (!isInt<12>(r.sym->getVA(ctx, r.addend) - gp->getVA(ctx)))
     return;
 
   switch (r.type) {
@@ -863,7 +864,7 @@ static bool relax(Ctx &ctx, InputSection &sec) {
       // For TLSDESC=>LE, we can use the short form if hi20 is zero.
       tlsdescRelax = relaxable(relocs, i);
       toLeShortForm = tlsdescRelax && r.expr == R_RELAX_TLS_GD_TO_LE &&
-                      !hi20(r.sym->getVA(r.addend));
+                      !hi20(r.sym->getVA(ctx, r.addend));
       [[fallthrough]];
     case R_RISCV_TLSDESC_LOAD_LO12:
       // For TLSDESC=>LE/IE, AUIPC and L[DW] are removed if relaxable.

diff  --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index 584379638ad981..106b530c31b28b 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -188,7 +188,7 @@ void SystemZ::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 
 void SystemZ::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   if (ctx.arg.writeAddends)
-    write64be(buf, s.getVA());
+    write64be(buf, s.getVA(ctx));
 }
 
 void SystemZ::writePltHeader(uint8_t *buf) const {

diff  --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 58199cdb99a284..a36212a5b1690a 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -181,7 +181,7 @@ void X86::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 
 void X86::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   // An x86 entry is the address of the ifunc resolver function.
-  write32le(buf, s.getVA());
+  write32le(buf, s.getVA(ctx));
 }
 
 RelType X86::getDynRel(RelType type) const {

diff  --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index df2983f2022818..d32ba638b740c5 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -429,7 +429,7 @@ void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   // An x86 entry is the address of the ifunc resolver function (for -z rel).
   if (ctx.arg.writeAddends)
-    write64le(buf, s.getVA());
+    write64le(buf, s.getVA(ctx));
 }
 
 void X86_64::writePltHeader(uint8_t *buf) const {

diff  --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index 2e9e8a7007bbf8..6c34471a9e5022 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -508,7 +508,8 @@ void InputSection::copyRelocations(Ctx &ctx, uint8_t *buf,
       }
 
       if (RelTy::HasAddend)
-        p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr;
+        p->r_addend =
+            sym.getVA(ctx, addend) - section->getOutputSection()->addr;
       // For SHF_ALLOC sections relocated by REL, append a relocation to
       // sec->relocations so that relocateAlloc transitively called by
       // writeSections will update the implicit addend. Non-SHF_ALLOC sections
@@ -701,7 +702,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
     // Variant 1.
   case EM_ARM:
   case EM_AARCH64:
-    return s.getVA(0) + ctx.arg.wordsize * 2 +
+    return s.getVA(ctx, 0) + ctx.arg.wordsize * 2 +
            ((tls->p_vaddr - ctx.arg.wordsize * 2) & (tls->p_align - 1));
   case EM_MIPS:
   case EM_PPC:
@@ -709,7 +710,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
     // Adjusted Variant 1. TP is placed with a displacement of 0x7000, which is
     // to allow a signed 16-bit offset to reach 0x1000 of TCB/thread-library
     // data and 0xf000 of the program's TLS segment.
-    return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
+    return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1)) - 0x7000;
   case EM_LOONGARCH:
   case EM_RISCV:
     // See the comment in handleTlsRelocation. For TLSDESC=>IE,
@@ -717,7 +718,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
     // `tls` may be null, the return value is ignored.
     if (s.type != STT_TLS)
       return 0;
-    return s.getVA(0) + (tls->p_vaddr & (tls->p_align - 1));
+    return s.getVA(ctx, 0) + (tls->p_vaddr & (tls->p_align - 1));
 
     // Variant 2.
   case EM_HEXAGON:
@@ -725,7 +726,7 @@ static int64_t getTlsTpOffset(Ctx &ctx, const Symbol &s) {
   case EM_SPARCV9:
   case EM_386:
   case EM_X86_64:
-    return s.getVA(0) - tls->p_memsz -
+    return s.getVA(ctx, 0) - tls->p_memsz -
            ((-tls->p_vaddr - tls->p_memsz) & (tls->p_align - 1));
   default:
     llvm_unreachable("unhandled ctx.arg.emachine");
@@ -743,13 +744,13 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
   case R_AARCH64_AUTH:
   case R_RISCV_ADD:
   case R_RISCV_LEB128:
-    return r.sym->getVA(a);
+    return r.sym->getVA(ctx, a);
   case R_ADDEND:
     return a;
   case R_RELAX_HINT:
     return 0;
   case R_ARM_SBREL:
-    return r.sym->getVA(a) - getARMStaticBase(*r.sym);
+    return r.sym->getVA(ctx, a) - getARMStaticBase(*r.sym);
   case R_GOT:
   case R_RELAX_TLS_GD_TO_IE_ABS:
     return r.sym->getGotVA(ctx) + a;
@@ -767,9 +768,9 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return ctx.in.gotPlt->getVA() + a - p;
   case R_GOTREL:
   case R_PPC64_RELAX_TOC:
-    return r.sym->getVA(a) - ctx.in.got->getVA();
+    return r.sym->getVA(ctx, a) - ctx.in.got->getVA();
   case R_GOTPLTREL:
-    return r.sym->getVA(a) - ctx.in.gotPlt->getVA();
+    return r.sym->getVA(ctx, a) - ctx.in.gotPlt->getVA();
   case R_GOTPLT:
   case R_RELAX_TLS_GD_TO_IE_GOTPLT:
     return r.sym->getGotVA(ctx) + a - ctx.in.gotPlt->getVA();
@@ -795,7 +796,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
                                    r.type);
     return getLoongArchPageDelta(r.sym->getGotVA(ctx) + a, p, r.type);
   case R_MIPS_GOTREL:
-    return r.sym->getVA(a) - ctx.in.mipsGot->getGp(file);
+    return r.sym->getVA(ctx, a) - ctx.in.mipsGot->getGp(file);
   case R_MIPS_GOT_GP:
     return ctx.in.mipsGot->getGp(file) + a;
   case R_MIPS_GOT_GP_PC: {
@@ -836,16 +837,16 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) -
            ctx.in.mipsGot->getGp(file);
   case R_AARCH64_PAGE_PC: {
-    uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(a);
+    uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(ctx, a);
     return getAArch64Page(val) - getAArch64Page(p);
   }
   case R_RISCV_PC_INDIRECT: {
     if (const Relocation *hiRel = getRISCVPCRelHi20(this, r))
-      return getRelocTargetVA(ctx, *hiRel, r.sym->getVA());
+      return getRelocTargetVA(ctx, *hiRel, r.sym->getVA(ctx));
     return 0;
   }
   case R_LOONGARCH_PAGE_PC:
-    return getLoongArchPageDelta(r.sym->getVA(a), p, r.type);
+    return getLoongArchPageDelta(r.sym->getVA(ctx, a), p, r.type);
   case R_PC:
   case R_ARM_PCA: {
     uint64_t dest;
@@ -868,9 +869,9 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
       else if (ctx.arg.emachine == EM_RISCV)
         dest = getRISCVUndefinedRelativeWeakVA(r.type, p) + a;
       else
-        dest = r.sym->getVA(a);
+        dest = r.sym->getVA(ctx, a);
     } else {
-      dest = r.sym->getVA(a);
+      dest = r.sym->getVA(ctx, a);
     }
     return dest - p;
   }
@@ -891,7 +892,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     // target VA computation.
     return r.sym->getPltVA(ctx) - p;
   case R_PPC64_CALL: {
-    uint64_t symVA = r.sym->getVA(a);
+    uint64_t symVA = r.sym->getVA(ctx, a);
     // If we have an undefined weak symbol, we might get here with a symbol
     // address of zero. That could overflow, but the code must be unreachable,
     // so don't bother doing anything at all.
@@ -910,7 +911,7 @@ uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
     return getPPC64TocBase(ctx) + a;
   case R_RELAX_GOT_PC:
   case R_PPC64_RELAX_GOT_PC:
-    return r.sym->getVA(a) - p;
+    return r.sym->getVA(ctx, a) - p;
   case R_RELAX_TLS_GD_TO_LE:
   case R_RELAX_TLS_IE_TO_LE:
   case R_RELAX_TLS_LD_TO_LE:
@@ -1016,8 +1017,8 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
         if (!ds && tombstone) {
           val = *tombstone;
         } else {
-          val = sym.getVA(addend) -
-                (f->getRelocTargetSym(*it).getVA(0) + getAddend<ELFT>(*it));
+          val = sym.getVA(ctx, addend) -
+                (f->getRelocTargetSym(*it).getVA(ctx) + getAddend<ELFT>(*it));
         }
         if (overwriteULEB128(bufLoc, val) >= 0x80)
           errorOrWarn(getLocation(offset) + ": ULEB128 value " + Twine(val) +
@@ -1083,7 +1084,8 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
     // sections.
     if (LLVM_LIKELY(expr == R_ABS) || expr == R_DTPREL || expr == R_GOTPLTREL ||
         expr == R_RISCV_ADD) {
-      target.relocateNoSym(bufLoc, type, SignExtend64<bits>(sym.getVA(addend)));
+      target.relocateNoSym(bufLoc, type,
+                           SignExtend64<bits>(sym.getVA(ctx, addend)));
       continue;
     }
 
@@ -1116,7 +1118,7 @@ void InputSection::relocateNonAlloc(Ctx &ctx, uint8_t *buf,
     warn(msg);
     target.relocateNoSym(
         bufLoc, type,
-        SignExtend64<bits>(sym.getVA(addend - offset - outSecOff)));
+        SignExtend64<bits>(sym.getVA(ctx, addend - offset - outSecOff)));
   }
 }
 

diff  --git a/lld/ELF/MapFile.cpp b/lld/ELF/MapFile.cpp
index 6bbc1ecc646fdd..84bc95f9bd00ce 100644
--- a/lld/ELF/MapFile.cpp
+++ b/lld/ELF/MapFile.cpp
@@ -68,7 +68,7 @@ static std::vector<Defined *> getSymbols(Ctx &ctx) {
 static SymbolMapTy getSectionSyms(ArrayRef<Defined *> syms) {
   SymbolMapTy ret;
   for (Defined *dr : syms)
-    ret[dr->section].emplace_back(dr, dr->getVA());
+    ret[dr->section].emplace_back(dr, dr->getVA(ctx));
 
   // Sort symbols by address. We want to print out symbols in the
   // order in the output file rather than the order they appeared
@@ -95,7 +95,7 @@ getSymbolStrings(Ctx &ctx, ArrayRef<Defined *> syms) {
   parallelFor(0, syms.size(), [&](size_t i) {
     raw_string_ostream os(strs[i]);
     OutputSection *osec = syms[i]->getOutputSection();
-    uint64_t vma = syms[i]->getVA();
+    uint64_t vma = syms[i]->getVA(ctx);
     uint64_t lma = osec ? osec->getLMA() + vma - osec->getVA(0) : 0;
     writeHeader(ctx, os, vma, lma, syms[i]->getSize(), 1);
     os << indent16 << toString(*syms[i]);

diff  --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp
index 309039fe7e204a..6f76c5d73a5388 100644
--- a/lld/ELF/OutputSections.cpp
+++ b/lld/ELF/OutputSections.cpp
@@ -624,7 +624,7 @@ encodeOneCrel(Ctx &ctx, raw_svector_ostream &os,
     if (d) {
       SectionBase *section = d->section;
       assert(section->isLive());
-      addend = sym.getVA(addend) - section->getOutputSection()->addr;
+      addend = sym.getVA(ctx, addend) - section->getOutputSection()->addr;
     } else {
       // Encode R_*_NONE(symidx=0).
       symidx = type = addend = 0;

diff  --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index c8dcc276c30a66..d40348a7b30d8f 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -2257,7 +2257,7 @@ std::pair<Thunk *, bool> ThunkCreator::getThunk(InputSection *isec,
     if (isThunkSectionCompatible(isec, t->getThunkTargetSym()->section) &&
         t->isCompatibleWith(*isec, rel) &&
         ctx.target->inBranchRange(rel.type, src,
-                                  t->getThunkTargetSym()->getVA(-pcBias)))
+                                  t->getThunkTargetSym()->getVA(ctx, -pcBias)))
       return std::make_pair(t, false);
 
   // No existing compatible Thunk in range, create a new one
@@ -2281,7 +2281,8 @@ std::pair<Thunk *, bool> ThunkCreator::getSyntheticLandingPad(Defined &d,
 // relocation back to its original non-Thunk target.
 bool ThunkCreator::normalizeExistingThunk(Relocation &rel, uint64_t src) {
   if (Thunk *t = thunks.lookup(rel.sym)) {
-    if (ctx.target->inBranchRange(rel.type, src, rel.sym->getVA(rel.addend)))
+    if (ctx.target->inBranchRange(rel.type, src,
+                                  rel.sym->getVA(ctx, rel.addend)))
       return true;
     rel.sym = &t->destination;
     rel.addend = t->addend;

diff  --git a/lld/ELF/Symbols.cpp b/lld/ELF/Symbols.cpp
index 3caa609338e068..6d9b3c839f86ae 100644
--- a/lld/ELF/Symbols.cpp
+++ b/lld/ELF/Symbols.cpp
@@ -58,7 +58,7 @@ std::string lld::toString(const elf::Symbol &sym) {
   return ret;
 }
 
-static uint64_t getSymVA(const Symbol &sym, int64_t addend) {
+static uint64_t getSymVA(Ctx &ctx, const Symbol &sym, int64_t addend) {
   switch (sym.kind()) {
   case Symbol::DefinedKind: {
     auto &d = cast<Defined>(sym);
@@ -141,8 +141,8 @@ static uint64_t getSymVA(const Symbol &sym, int64_t addend) {
   llvm_unreachable("invalid symbol kind");
 }
 
-uint64_t Symbol::getVA(int64_t addend) const {
-  return getSymVA(*this, addend) + addend;
+uint64_t Symbol::getVA(Ctx &ctx, int64_t addend) const {
+  return getSymVA(ctx, *this, addend) + addend;
 }
 
 uint64_t Symbol::getGotVA(Ctx &ctx) const {

diff  --git a/lld/ELF/Symbols.h b/lld/ELF/Symbols.h
index 86abebe79f8db6..339f32e05f1625 100644
--- a/lld/ELF/Symbols.h
+++ b/lld/ELF/Symbols.h
@@ -210,7 +210,7 @@ class Symbol {
   bool isInGot(Ctx &ctx) const { return getGotIdx(ctx) != uint32_t(-1); }
   bool isInPlt(Ctx &ctx) const { return getPltIdx(ctx) != uint32_t(-1); }
 
-  uint64_t getVA(int64_t addend = 0) const;
+  uint64_t getVA(Ctx &, int64_t addend = 0) const;
 
   uint64_t getGotOffset(Ctx &) const;
   uint64_t getGotVA(Ctx &) const;

diff  --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index f50404ed3016f4..7a344635a1cb53 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -749,7 +749,7 @@ void MipsGotSection::addEntry(InputFile &file, Symbol &sym, int64_t addend,
     if (const OutputSection *os = sym.getOutputSection())
       g.pagesMap.insert({os, {}});
     else
-      g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(addend))}, 0});
+      g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(ctx, addend))}, 0});
   } else if (sym.isTls())
     g.tls.insert({&sym, 0});
   else if (sym.isPreemptible && expr == R_ABS)
@@ -808,10 +808,11 @@ uint64_t MipsGotSection::getPageEntryOffset(const InputFile *f,
   uint64_t index = 0;
   if (const OutputSection *outSec = sym.getOutputSection()) {
     uint64_t secAddr = getMipsPageAddr(outSec->addr);
-    uint64_t symAddr = getMipsPageAddr(sym.getVA(addend));
+    uint64_t symAddr = getMipsPageAddr(sym.getVA(ctx, addend));
     index = g.pagesMap.lookup(outSec).firstIndex + (symAddr - secAddr) / 0xffff;
   } else {
-    index = g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(addend))});
+    index =
+        g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(ctx, addend))});
   }
   return index * ctx.arg.wordsize;
 }
@@ -1099,7 +1100,7 @@ uint64_t MipsGotSection::getGp(const InputFile *f) const {
   // returns "common" _gp value. For secondary GOTs calculate
   // individual _gp values.
   if (!f || f->mipsGotIndex == uint32_t(-1) || f->mipsGotIndex == 0)
-    return ctx.sym.mipsGp->getVA(0);
+    return ctx.sym.mipsGp->getVA(ctx, 0);
   return getVA() + gots[f->mipsGotIndex].startIndex * ctx.arg.wordsize + 0x7ff0;
 }
 
@@ -1124,7 +1125,7 @@ void MipsGotSection::writeTo(uint8_t *buf) {
     auto write = [&](size_t i, const Symbol *s, int64_t a) {
       uint64_t va = a;
       if (s)
-        va = s->getVA(a);
+        va = s->getVA(ctx, a);
       writeUint(ctx, buf + i * ctx.arg.wordsize, va);
     };
     // Write 'page address' entries to the local part of the GOT.
@@ -1522,10 +1523,10 @@ DynamicSection<ELFT>::computeContents() {
 
     if (Symbol *b = ctx.symtab->find(ctx.arg.init))
       if (b->isDefined())
-        addInt(DT_INIT, b->getVA());
+        addInt(DT_INIT, b->getVA(ctx));
     if (Symbol *b = ctx.symtab->find(ctx.arg.fini))
       if (b->isDefined())
-        addInt(DT_FINI, b->getVA());
+        addInt(DT_FINI, b->getVA(ctx));
   }
 
   if (part.verSym && part.verSym->isNeeded())
@@ -2288,7 +2289,7 @@ template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {
       const uint32_t shndx = getSymSectionIndex(sym);
       if (isDefinedHere) {
         eSym->st_shndx = shndx;
-        eSym->st_value = sym->getVA();
+        eSym->st_value = sym->getVA(ctx);
         // Copy symbol size if it is a defined symbol. st_size is not
         // significant for undefined symbols, so whether copying it or not is up
         // to us if that's the case. We'll leave it as zero because by not
@@ -3241,7 +3242,7 @@ void DebugNamesSection<ELFT>::getNameRelocs(
     Relocs<RelTy> rels) {
   for (const RelTy &rel : rels) {
     Symbol &sym = file.getRelocTargetSym(rel);
-    relocs[rel.r_offset] = sym.getVA(getAddend<ELFT>(rel));
+    relocs[rel.r_offset] = sym.getVA(ctx, getAddend<ELFT>(rel));
   }
 }
 
@@ -4356,11 +4357,11 @@ void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) {
   for (auto entry : entries) {
     const Symbol *sym = entry.first;
     int64_t addend = entry.second;
-    assert(sym->getVA());
+    assert(sym->getVA(ctx));
     // Need calls to branch to the local entry-point since a long-branch
     // must be a local-call.
     write64(ctx, buf,
-            sym->getVA(addend) +
+            sym->getVA(ctx, addend) +
                 getPPC64GlobalEntryToLocalEntryOffset(sym->stOther));
     buf += 8;
   }
@@ -4616,7 +4617,7 @@ createMemtagGlobalDescriptors(Ctx &ctx,
   for (const Symbol *sym : symbols) {
     if (!includeInSymtab(ctx, *sym))
       continue;
-    const uint64_t addr = sym->getVA();
+    const uint64_t addr = sym->getVA(ctx);
     const uint64_t size = sym->getSize();
 
     if (addr <= kMemtagGranuleSize && buf != nullptr)
@@ -4653,8 +4654,8 @@ createMemtagGlobalDescriptors(Ctx &ctx,
 bool MemtagGlobalDescriptors::updateAllocSize(Ctx &ctx) {
   size_t oldSize = getSize();
   std::stable_sort(symbols.begin(), symbols.end(),
-                   [](const Symbol *s1, const Symbol *s2) {
-                     return s1->getVA() < s2->getVA();
+                   [&ctx = ctx](const Symbol *s1, const Symbol *s2) {
+                     return s1->getVA(ctx) < s2->getVA(ctx);
                    });
   return oldSize != getSize();
 }

diff  --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp
index 971b2724b3e26f..94c0b2409c6c7c 100644
--- a/lld/ELF/Thunks.cpp
+++ b/lld/ELF/Thunks.cpp
@@ -464,7 +464,7 @@ class PPC64R2SaveStub final : public Thunk {
   // This is similar to the handling for ARMThunk.
   bool mayUseShortThunk = true;
   int64_t computeOffset() const {
-    return destination.getVA() - (getThunkTargetSym()->getVA() + 4);
+    return destination.getVA(ctx) - (getThunkTargetSym()->getVA(ctx) + 4);
   }
 };
 
@@ -550,7 +550,7 @@ void Thunk::setOffset(uint64_t newOffset) {
 
 // AArch64 Thunk base class.
 static uint64_t getAArch64ThunkDestVA(Ctx &ctx, const Symbol &s, int64_t a) {
-  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(a);
+  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(ctx, a);
   return v;
 }
 
@@ -558,7 +558,7 @@ bool AArch64Thunk::getMayUseShortThunk() {
   if (!mayUseShortThunk)
     return false;
   uint64_t s = getAArch64ThunkDestVA(ctx, destination, addend);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   mayUseShortThunk = llvm::isInt<28>(s - p);
   return mayUseShortThunk;
 }
@@ -569,7 +569,7 @@ void AArch64Thunk::writeTo(uint8_t *buf) {
     return;
   }
   uint64_t s = getAArch64ThunkDestVA(ctx, destination, addend);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   write32(ctx, buf, 0x14000000); // b S
   ctx.target->relocateNoSym(buf, R_AARCH64_CALL26, s - p);
 }
@@ -592,7 +592,7 @@ void AArch64ABSLongThunk::writeLong(uint8_t *buf) {
   // AArch64BTILandingPadThunk that defines landingPad.
   assert(!mayNeedLandingPad || landingPad != nullptr);
   uint64_t s = mayNeedLandingPad
-                   ? landingPad->getVA(0)
+                   ? landingPad->getVA(ctx, 0)
                    : getAArch64ThunkDestVA(ctx, destination, addend);
   memcpy(buf, data, sizeof(data));
   ctx.target->relocateNoSym(buf + 8, R_AARCH64_ABS64, s);
@@ -621,9 +621,9 @@ void AArch64ADRPThunk::writeLong(uint8_t *buf) {
   // AArch64BTILandingPadThunk that defines landingPad.
   assert(!mayNeedLandingPad || landingPad != nullptr);
   uint64_t s = mayNeedLandingPad
-                   ? landingPad->getVA(0)
+                   ? landingPad->getVA(ctx, 0)
                    : getAArch64ThunkDestVA(ctx, destination, addend);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   memcpy(buf, data, sizeof(data));
   ctx.target->relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21,
                             getAArch64Page(s) - getAArch64Page(p));
@@ -656,8 +656,8 @@ bool AArch64BTILandingPadThunk::getMayUseShortThunk() {
     return false;
   // If the target is the following instruction then we can fall
   // through without the indirect branch.
-  uint64_t s = destination.getVA(addend);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t s = destination.getVA(ctx, addend);
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   // This function is called before addresses are stable.  We need to
   // work out the range from the thunk to the next section but the
   // address of the start of the next section depends on the size of
@@ -670,8 +670,8 @@ bool AArch64BTILandingPadThunk::getMayUseShortThunk() {
 }
 
 void AArch64BTILandingPadThunk::writeLong(uint8_t *buf) {
-  uint64_t s = destination.getVA(addend);
-  uint64_t p = getThunkTargetSym()->getVA() + 4;
+  uint64_t s = destination.getVA(ctx, addend);
+  uint64_t p = getThunkTargetSym()->getVA(ctx) + 4;
   write32(ctx, buf, 0xd503245f);     // BTI c
   write32(ctx, buf + 4, 0x14000000); // B S
   ctx.target->relocateNoSym(buf + 4, R_AARCH64_CALL26, s - p);
@@ -679,7 +679,7 @@ void AArch64BTILandingPadThunk::writeLong(uint8_t *buf) {
 
 // ARM Target Thunks
 static uint64_t getARMThunkDestVA(Ctx &ctx, const Symbol &s) {
-  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA();
+  uint64_t v = s.isInPlt(ctx) ? s.getPltVA(ctx) : s.getVA(ctx);
   return SignExtend64<32>(v);
 }
 
@@ -693,7 +693,7 @@ bool ARMThunk::getMayUseShortThunk() {
     mayUseShortThunk = false;
     return false;
   }
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   int64_t offset = s - p - 8;
   mayUseShortThunk = llvm::isInt<26>(offset);
   return mayUseShortThunk;
@@ -706,7 +706,7 @@ void ARMThunk::writeTo(uint8_t *buf) {
   }
 
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   int64_t offset = s - p - 8;
   write32(ctx, buf, 0xea000000); // b S
   ctx.target->relocateNoSym(buf, R_ARM_JUMP24, offset);
@@ -736,7 +736,7 @@ bool ThumbThunk::getMayUseShortThunk() {
     mayUseShortThunk = false;
     return false;
   }
-  uint64_t p = getThunkTargetSym()->getVA() & ~1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~1;
   int64_t offset = s - p - 4;
   mayUseShortThunk = llvm::isInt<25>(offset);
   return mayUseShortThunk;
@@ -749,7 +749,7 @@ void ThumbThunk::writeTo(uint8_t *buf) {
   }
 
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   int64_t offset = s - p - 4;
   write16(ctx, buf + 0, 0xf000); // b.w S
   write16(ctx, buf + 2, 0xb000);
@@ -806,7 +806,7 @@ void ARMV7PILongThunk::writeLong(uint8_t *buf) {
   write32(ctx, buf + 8, 0xe08cc00f);  // L1: add  ip, ip, pc
   write32(ctx, buf + 12, 0xe12fff1c); //     bx   ip
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   int64_t offset = s - p - 16;
   ctx.target->relocateNoSym(buf, R_ARM_MOVW_PREL_NC, offset);
   ctx.target->relocateNoSym(buf + 4, R_ARM_MOVT_PREL, offset);
@@ -826,7 +826,7 @@ void ThumbV7PILongThunk::writeLong(uint8_t *buf) {
   write16(ctx, buf + 8, 0x44fc);  // L1: add  ip, pc
   write16(ctx, buf + 10, 0x4760); //     bx   ip
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   int64_t offset = s - p - 12;
   ctx.target->relocateNoSym(buf, R_ARM_THM_MOVW_PREL_NC, offset);
   ctx.target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_PREL, offset);
@@ -904,7 +904,7 @@ void ThumbV6MPILongThunk::writeLong(uint8_t *buf) {
           0x46c0); //     nop              ; pad to 4-byte boundary
   write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 4)
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
 }
 
@@ -992,7 +992,7 @@ void ARMV4PILongBXThunk::writeLong(uint8_t *buf) {
   write32(ctx, buf + 8, 0xe12fff1c);  //     bx ip
   write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12);
 }
 
@@ -1009,7 +1009,7 @@ void ARMV4PILongThunk::writeLong(uint8_t *buf) {
   write32(ctx, buf + 4, 0xe08ff00c); // L1: add pc, pc, r12
   write32(ctx, buf + 8, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   ctx.target->relocateNoSym(buf + 8, R_ARM_REL32, s - p - 12);
 }
 
@@ -1029,7 +1029,7 @@ void ThumbV4PILongBXThunk::writeLong(uint8_t *buf) {
   write32(ctx, buf + 8, 0xe08cf00f);  // L1: add pc, r12, pc
   write32(ctx, buf + 12, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   ctx.target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 16);
 }
 
@@ -1051,7 +1051,7 @@ void ThumbV4PILongThunk::writeLong(uint8_t *buf) {
   write32(ctx, buf + 12, 0xe12fff1c); //     bx ip
   write32(ctx, buf + 16, 0x00000000); // L2: .word S - (P + (L1 - P) + 8)
   uint64_t s = getARMThunkDestVA(ctx, destination);
-  uint64_t p = getThunkTargetSym()->getVA() & ~0x1;
+  uint64_t p = getThunkTargetSym()->getVA(ctx) & ~0x1;
   ctx.target->relocateNoSym(buf + 16, R_ARM_REL32, s - p - 16);
 }
 
@@ -1067,7 +1067,7 @@ void ThumbV4PILongThunk::addSymbols(ThunkSection &isec) {
 // Use the long jump which covers a range up to 8MiB.
 void AVRThunk::writeTo(uint8_t *buf) {
   write32(ctx, buf, 0x940c); // jmp func
-  ctx.target->relocateNoSym(buf, R_AVR_CALL, destination.getVA());
+  ctx.target->relocateNoSym(buf, R_AVR_CALL, destination.getVA(ctx));
 }
 
 void AVRThunk::addSymbols(ThunkSection &isec) {
@@ -1077,7 +1077,7 @@ void AVRThunk::addSymbols(ThunkSection &isec) {
 
 // Write MIPS LA25 thunk code to call PIC function from the non-PIC one.
 void MipsThunk::writeTo(uint8_t *buf) {
-  uint64_t s = destination.getVA();
+  uint64_t s = destination.getVA(ctx);
   write32(ctx, buf, 0x3c190000);                // lui   $25, %hi(func)
   write32(ctx, buf + 4, 0x08000000 | (s >> 2)); // j     func
   write32(ctx, buf + 8, 0x27390000);            // addiu $25, $25, %lo(func)
@@ -1099,7 +1099,7 @@ InputSection *MipsThunk::getTargetInputSection() const {
 // Write microMIPS R2-R5 LA25 thunk code
 // to call PIC function from the non-PIC one.
 void MicroMipsThunk::writeTo(uint8_t *buf) {
-  uint64_t s = destination.getVA();
+  uint64_t s = destination.getVA(ctx);
   write16(ctx, buf, 0x41b9);      // lui   $25, %hi(func)
   write16(ctx, buf + 4, 0xd400);  // j     func
   write16(ctx, buf + 8, 0x3339);  // addiu $25, $25, %lo(func)
@@ -1124,8 +1124,8 @@ InputSection *MicroMipsThunk::getTargetInputSection() const {
 // Write microMIPS R6 LA25 thunk code
 // to call PIC function from the non-PIC one.
 void MicroMipsR6Thunk::writeTo(uint8_t *buf) {
-  uint64_t s = destination.getVA();
-  uint64_t p = getThunkTargetSym()->getVA();
+  uint64_t s = destination.getVA(ctx);
+  uint64_t p = getThunkTargetSym()->getVA(ctx);
   write16(ctx, buf, 0x1320);     // lui   $25, %hi(func)
   write16(ctx, buf + 4, 0x3339); // addiu $25, $25, %lo(func)
   write16(ctx, buf + 8, 0x9400); // bc    func
@@ -1213,9 +1213,9 @@ void PPC32LongThunk::addSymbols(ThunkSection &isec) {
 void PPC32LongThunk::writeTo(uint8_t *buf) {
   auto ha = [](uint32_t v) -> uint16_t { return (v + 0x8000) >> 16; };
   auto lo = [](uint32_t v) -> uint16_t { return v; };
-  uint32_t d = destination.getVA(addend);
+  uint32_t d = destination.getVA(ctx, addend);
   if (ctx.arg.isPic) {
-    uint32_t off = d - (getThunkTargetSym()->getVA() + 8);
+    uint32_t off = d - (getThunkTargetSym()->getVA(ctx) + 8);
     write32(ctx, buf + 0, 0x7c0802a6);            // mflr r12,0
     write32(ctx, buf + 4, 0x429f0005);            // bcl r20,r31,.+4
     write32(ctx, buf + 8, 0x7d8802a6);            // mtctr r12
@@ -1269,7 +1269,7 @@ void PPC64R2SaveStub::writeTo(uint8_t *buf) {
     write32(ctx, buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b    <offset>
   } else if (isInt<34>(offset)) {
     int nextInstOffset;
-    uint64_t tocOffset = destination.getVA() - getPPC64TocBase(ctx);
+    uint64_t tocOffset = destination.getVA(ctx) - getPPC64TocBase(ctx);
     if (tocOffset >> 16 > 0) {
       const uint64_t addi = ADDI_R12_TO_R12_NO_DISP | (tocOffset & 0xffff);
       const uint64_t addis =
@@ -1306,8 +1306,8 @@ bool PPC64R2SaveStub::isCompatibleWith(const InputSection &isec,
 
 void PPC64R12SetupStub::writeTo(uint8_t *buf) {
   int64_t offset =
-      (gotPlt ? destination.getGotPltVA(ctx) : destination.getVA()) -
-      getThunkTargetSym()->getVA();
+      (gotPlt ? destination.getGotPltVA(ctx) : destination.getVA(ctx)) -
+      getThunkTargetSym()->getVA(ctx);
   if (!isInt<34>(offset))
     reportRangeError(ctx, buf, offset, 34, destination,
                      "R12 setup stub offset");
@@ -1393,7 +1393,7 @@ static Thunk *addThunkAArch64(Ctx &ctx, RelType type, Symbol &s, int64_t a) {
 // TODO: use B for short Thumb->Arm thunks instead of LDR (this doesn't work for
 //       Arm->Thumb, as in Arm state no BX PC trick; it doesn't switch state).
 static Thunk *addThunkArmv4(Ctx &ctx, RelType reloc, Symbol &s, int64_t a) {
-  bool thumb_target = s.getVA(a) & 1;
+  bool thumb_target = s.getVA(ctx, a) & 1;
 
   switch (reloc) {
   case R_ARM_PC24:

diff  --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index c237a5f3793a12..975954991caebe 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1504,9 +1504,9 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
       // .rela.dyn. See also AArch64::relocate.
       if (part.relrAuthDyn) {
         auto it = llvm::remove_if(
-            part.relrAuthDyn->relocs, [&part](const RelativeReloc &elem) {
+            part.relrAuthDyn->relocs, [this, &part](const RelativeReloc &elem) {
               const Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
-              if (isInt<32>(reloc.sym->getVA(reloc.addend)))
+              if (isInt<32>(reloc.sym->getVA(ctx, reloc.addend)))
                 return false;
               part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, elem.inputSec,
                                       reloc.offset,
@@ -2713,7 +2713,7 @@ template <class ELFT> void Writer<ELFT>::checkSections() {
 static uint64_t getEntryAddr(Ctx &ctx) {
   // Case 1, 2 or 3
   if (Symbol *b = ctx.symtab->find(ctx.arg.entry))
-    return b->getVA();
+    return b->getVA(ctx);
 
   // Case 4
   uint64_t addr;


        


More information about the llvm-commits mailing list