[lld] 1dd9a56 - [ELF] Replace config-> with ctx.arg. in Arch/

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 21 12:03:23 PDT 2024


Author: Fangrui Song
Date: 2024-09-21T12:03:18-07:00
New Revision: 1dd9a565eae55edbc74822c29aa1f8ac2bc3e88b

URL: https://github.com/llvm/llvm-project/commit/1dd9a565eae55edbc74822c29aa1f8ac2bc3e88b
DIFF: https://github.com/llvm/llvm-project/commit/1dd9a565eae55edbc74822c29aa1f8ac2bc3e88b.diff

LOG: [ELF] Replace config-> with ctx.arg. in Arch/

Added: 
    

Modified: 
    lld/ELF/Arch/AArch64.cpp
    lld/ELF/Arch/ARM.cpp
    lld/ELF/Arch/LoongArch.cpp
    lld/ELF/Arch/Mips.cpp
    lld/ELF/Arch/MipsArchTree.cpp
    lld/ELF/Arch/PPC.cpp
    lld/ELF/Arch/PPC64.cpp
    lld/ELF/Arch/RISCV.cpp
    lld/ELF/Arch/SystemZ.cpp
    lld/ELF/Arch/X86.cpp
    lld/ELF/Arch/X86_64.cpp

Removed: 
    


################################################################################
diff  --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index 36880bf67e9f23..fb70e66b6d7561 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -326,7 +326,7 @@ void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
 }
 
 void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->writeAddends)
+  if (ctx.arg.writeAddends)
     write64(buf, s.getVA());
 }
 
@@ -719,7 +719,7 @@ void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
 }
 
 AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {
-  if (!config->relax)
+  if (!ctx.arg.relax)
     return;
   // Check if R_AARCH64_ADR_GOT_PAGE and R_AARCH64_LD64_GOT_LO12_NC
   // always appear in pairs.
@@ -749,7 +749,7 @@ bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
   // to
   // NOP
   // ADR xn, sym
-  if (!config->relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 ||
+  if (!ctx.arg.relax || adrpRel.type != R_AARCH64_ADR_PREL_PG_HI21 ||
       addRel.type != R_AARCH64_ADD_ABS_LO12_NC)
     return false;
   // Check if the relocations apply to consecutive instructions.
@@ -836,7 +836,7 @@ bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
   // GOT references to absolute symbols can't be relaxed to use ADRP/ADD in
   // position-independent code because these instructions produce a relative
   // address.
-  if (config->isPic && !cast<Defined>(sym).section)
+  if (ctx.arg.isPic && !cast<Defined>(sym).section)
     return false;
   // Check if the address 
diff erence is within 4GB range.
   int64_t val =
@@ -972,7 +972,7 @@ class AArch64BtiPac final : public AArch64 {
 } // namespace
 
 AArch64BtiPac::AArch64BtiPac() {
-  btiHeader = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
+  btiHeader = (ctx.arg.andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
   // A BTI (Branch Target Indicator) Plt Entry is only required if the
   // address of the PLT entry can be taken by the program, which permits an
   // indirect jump to the PLT entry. This can happen when the address
@@ -982,7 +982,7 @@ AArch64BtiPac::AArch64BtiPac() {
   // relocations.
   // The PAC PLT entries require dynamic loader support and this isn't known
   // from properties in the objects, so we use the command line flag.
-  pacEntry = config->zPacPlt;
+  pacEntry = ctx.arg.zPacPlt;
 
   if (btiHeader || pacEntry) {
     pltEntrySize = 24;
@@ -1074,8 +1074,8 @@ void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,
 }
 
 static TargetInfo *getTargetInfo() {
-  if ((config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ||
-      config->zPacPlt) {
+  if ((ctx.arg.andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ||
+      ctx.arg.zPacPlt) {
     static AArch64BtiPac t;
     return &t;
   }
@@ -1175,7 +1175,7 @@ void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {
   // `addTaggedSymbolReferences` has already checked that we have RELA
   // relocations, the only other way to get written addends is with
   // --apply-dynamic-relocs.
-  if (!taggedSymbolReferenceCount.empty() && config->writeAddends)
+  if (!taggedSymbolReferenceCount.empty() && ctx.arg.writeAddends)
     error("--apply-dynamic-relocs cannot be used with MTE globals");
 
   // Now, `taggedSymbolReferenceCount` should only contain symbols that are

diff  --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index 12b576979512b6..3484e66d2b1d4d 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -81,13 +81,13 @@ uint32_t ARM::calcEFlags() const {
   // with BE-8 code.
   uint32_t armBE8 = 0;
 
-  if (config->armVFPArgs == ARMVFPArgKind::Base ||
-      config->armVFPArgs == ARMVFPArgKind::Default)
+  if (ctx.arg.armVFPArgs == ARMVFPArgKind::Base ||
+      ctx.arg.armVFPArgs == ARMVFPArgKind::Default)
     abiFloatType = EF_ARM_ABI_FLOAT_SOFT;
-  else if (config->armVFPArgs == ARMVFPArgKind::VFP)
+  else if (ctx.arg.armVFPArgs == ARMVFPArgKind::VFP)
     abiFloatType = EF_ARM_ABI_FLOAT_HARD;
 
-  if (!config->isLE && config->armBe8)
+  if (!ctx.arg.isLE && ctx.arg.armBe8)
     armBE8 = EF_ARM_BE8;
 
   // We don't currently use any features incompatible with EF_ARM_EABI_VER5,
@@ -134,11 +134,11 @@ RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
   case R_ARM_SBREL32:
     return R_ARM_SBREL;
   case R_ARM_TARGET1:
-    return config->target1Rel ? R_PC : R_ABS;
+    return ctx.arg.target1Rel ? R_PC : R_ABS;
   case R_ARM_TARGET2:
-    if (config->target2 == Target2Policy::Rel)
+    if (ctx.arg.target2 == Target2Policy::Rel)
       return R_PC;
-    if (config->target2 == Target2Policy::Abs)
+    if (ctx.arg.target2 == Target2Policy::Abs)
       return R_ABS;
     return R_GOT_PC;
   case R_ARM_TLS_GD32:
@@ -198,7 +198,7 @@ RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
 }
 
 RelType ARM::getDynRel(RelType type) const {
-  if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel))
+  if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !ctx.arg.target1Rel))
     return R_ARM_ABS32;
   return R_ARM_NONE;
 }
@@ -231,7 +231,7 @@ static void writePltHeaderLong(uint8_t *buf) {
 // True if we should use Thumb PLTs, which currently require Thumb2, and are
 // only used if the target does not have the ARM ISA.
 static bool useThumbPLTs() {
-  return config->armHasThumb2ISA && !config->armHasArmISA;
+  return ctx.arg.armHasThumb2ISA && !ctx.arg.armHasArmISA;
 }
 
 // The default PLT header requires the .got.plt to be within 128 Mb of the
@@ -407,7 +407,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   case R_ARM_CALL: {
     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
     return !inBranchRange(type, branchAddr, dst + a) ||
-        (!config->armHasBlx && (s.getVA() & 1));
+        (!ctx.arg.armHasBlx && (s.getVA() & 1));
   }
   case R_ARM_THM_JUMP19:
   case R_ARM_THM_JUMP24:
@@ -420,7 +420,7 @@ bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
   case R_ARM_THM_CALL: {
     uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
     return !inBranchRange(type, branchAddr, dst + a) ||
-        (!config->armHasBlx && (s.getVA() & 1) == 0);;
+        (!ctx.arg.armHasBlx && (s.getVA() & 1) == 0);;
   }
   }
   return false;
@@ -456,7 +456,7 @@ uint32_t ARM::getThunkSectionSpacing() const {
   // range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except
   // ARMv6T2) the range is +/- 4MiB.
 
-  return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
+  return (ctx.arg.armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
                                          : 0x400000 - 0x7500;
 }
 
@@ -481,7 +481,7 @@ bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
     return llvm::isInt<21>(offset);
   case R_ARM_THM_JUMP24:
   case R_ARM_THM_CALL:
-    return config->armJ1J2BranchEncoding ? llvm::isInt<25>(offset)
+    return ctx.arg.armJ1J2BranchEncoding ? llvm::isInt<25>(offset)
                                          : llvm::isInt<23>(offset);
   default:
     return true;
@@ -697,7 +697,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     } else {
       write16(loc + 2, (read16(loc + 2) & ~0x1000) | 1 << 12);
     }
-    if (!config->armJ1J2BranchEncoding) {
+    if (!ctx.arg.armJ1J2BranchEncoding) {
       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
       // 
diff erent encoding rules and range due to J1 and J2 always being 1.
       checkInt(loc, val, 23, rel);
@@ -909,7 +909,7 @@ int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
                             ((lo & 0x07ff) << 1));  // imm11:0
   }
   case R_ARM_THM_CALL:
-    if (!config->armJ1J2BranchEncoding) {
+    if (!ctx.arg.armJ1J2BranchEncoding) {
       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
       // 
diff erent encoding rules and range due to J1 and J2 always being 1.
       uint16_t hi = read16(buf);
@@ -1261,7 +1261,7 @@ static std::string checkCmseSymAttributes(Symbol *acleSeSym, Symbol *sym) {
 // Both these symbols are Thumb function symbols with external linkage.
 // <sym> may be redefined in .gnu.sgstubs.
 void elf::processArmCmseSymbols() {
-  if (!config->cmseImplib)
+  if (!ctx.arg.cmseImplib)
     return;
   // Only symbols with external linkage end up in symtab, so no need to do
   // linkage checks. Only check symbol type.
@@ -1270,9 +1270,9 @@ void elf::processArmCmseSymbols() {
       continue;
     // If input object build attributes do not support CMSE, error and disable
     // further scanning for <sym>, __acle_se_<sym> pairs.
-    if (!config->armCMSESupport) {
+    if (!ctx.arg.armCMSESupport) {
       error("CMSE is only supported by ARMv8-M architecture or later");
-      config->cmseImplib = false;
+      ctx.arg.cmseImplib = false;
       break;
     }
 
@@ -1348,7 +1348,7 @@ ArmCmseSGSection::ArmCmseSGSection()
            "' from CMSE import library is not present in secure application");
   }
 
-  if (!symtab.cmseImportLib.empty() && config->cmseOutputLib.empty()) {
+  if (!symtab.cmseImportLib.empty() && ctx.arg.cmseOutputLib.empty()) {
     for (auto &[_, entryFunc] : symtab.cmseSymMap) {
       Symbol *sym = entryFunc.sym;
       if (!symtab.inCMSEOutImpLib.count(sym->getName()))
@@ -1476,17 +1476,17 @@ template <typename ELFT> void elf::writeARMCmseImportLib() {
     off = osec->offset + osec->size;
   }
 
-  const uint64_t sectionHeaderOff = alignToPowerOf2(off, config->wordsize);
+  const uint64_t sectionHeaderOff = alignToPowerOf2(off, ctx.arg.wordsize);
   const auto shnum = osIsPairs.size() + 1;
   const uint64_t fileSize =
       sectionHeaderOff + shnum * sizeof(typename ELFT::Shdr);
   const unsigned flags =
-      config->mmapOutputFile ? 0 : (unsigned)FileOutputBuffer::F_no_mmap;
-  unlinkAsync(config->cmseOutputLib);
+      ctx.arg.mmapOutputFile ? 0 : (unsigned)FileOutputBuffer::F_no_mmap;
+  unlinkAsync(ctx.arg.cmseOutputLib);
   Expected<std::unique_ptr<FileOutputBuffer>> bufferOrErr =
-      FileOutputBuffer::create(config->cmseOutputLib, fileSize, flags);
+      FileOutputBuffer::create(ctx.arg.cmseOutputLib, fileSize, flags);
   if (!bufferOrErr) {
-    error("failed to open " + config->cmseOutputLib + ": " +
+    error("failed to open " + ctx.arg.cmseOutputLib + ": " +
           llvm::toString(bufferOrErr.takeError()));
     return;
   }
@@ -1500,13 +1500,13 @@ template <typename ELFT> void elf::writeARMCmseImportLib() {
   eHdr->e_entry = 0;
   eHdr->e_shoff = sectionHeaderOff;
   eHdr->e_ident[EI_CLASS] = ELFCLASS32;
-  eHdr->e_ident[EI_DATA] = config->isLE ? ELFDATA2LSB : ELFDATA2MSB;
+  eHdr->e_ident[EI_DATA] = ctx.arg.isLE ? ELFDATA2LSB : ELFDATA2MSB;
   eHdr->e_ident[EI_VERSION] = EV_CURRENT;
-  eHdr->e_ident[EI_OSABI] = config->osabi;
+  eHdr->e_ident[EI_OSABI] = ctx.arg.osabi;
   eHdr->e_ident[EI_ABIVERSION] = 0;
   eHdr->e_machine = EM_ARM;
   eHdr->e_version = EV_CURRENT;
-  eHdr->e_flags = config->eflags;
+  eHdr->e_flags = ctx.arg.eflags;
   eHdr->e_ehsize = sizeof(typename ELFT::Ehdr);
   eHdr->e_phnum = 0;
   eHdr->e_shentsize = sizeof(typename ELFT::Shdr);

diff  --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 0044afb92cd412..662dcb2ef3c62f 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -188,7 +188,7 @@ LoongArch::LoongArch() {
   relativeRel = R_LARCH_RELATIVE;
   iRelativeRel = R_LARCH_IRELATIVE;
 
-  if (config->is64) {
+  if (ctx.arg.is64) {
     symbolicRel = R_LARCH_64;
     tlsModuleIndexRel = R_LARCH_TLS_DTPMOD64;
     tlsOffsetRel = R_LARCH_TLS_DTPREL64;
@@ -213,7 +213,7 @@ LoongArch::LoongArch() {
 }
 
 static uint32_t getEFlags(const InputFile *f) {
-  if (config->is64)
+  if (ctx.arg.is64)
     return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
   return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
 }
@@ -294,7 +294,7 @@ int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const {
     return read64le(buf);
   case R_LARCH_RELATIVE:
   case R_LARCH_IRELATIVE:
-    return config->is64 ? read64le(buf) : read32le(buf);
+    return ctx.arg.is64 ? read64le(buf) : read32le(buf);
   case R_LARCH_NONE:
   case R_LARCH_JUMP_SLOT:
     // These relocations are defined as not having an implicit addend.
@@ -307,15 +307,15 @@ int64_t LoongArch::getImplicitAddend(const uint8_t *buf, RelType type) const {
 }
 
 void LoongArch::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->is64)
+  if (ctx.arg.is64)
     write64le(buf, ctx.in.plt->getVA());
   else
     write32le(buf, ctx.in.plt->getVA());
 }
 
 void LoongArch::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->writeAddends) {
-    if (config->is64)
+  if (ctx.arg.writeAddends) {
+    if (ctx.arg.is64)
       write64le(buf, s.getVA());
     else
       write32le(buf, s.getVA());
@@ -342,18 +342,18 @@ void LoongArch::writePltHeader(uint8_t *buf) const {
   //   ld.[wd]   $t0, $t0, Wordsize               ; t0 = link_map
   //   jr        $t3
   uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
-  uint32_t sub = config->is64 ? SUB_D : SUB_W;
-  uint32_t ld = config->is64 ? LD_D : LD_W;
-  uint32_t addi = config->is64 ? ADDI_D : ADDI_W;
-  uint32_t srli = config->is64 ? SRLI_D : SRLI_W;
+  uint32_t sub = ctx.arg.is64 ? SUB_D : SUB_W;
+  uint32_t ld = ctx.arg.is64 ? LD_D : LD_W;
+  uint32_t addi = ctx.arg.is64 ? ADDI_D : ADDI_W;
+  uint32_t srli = ctx.arg.is64 ? SRLI_D : SRLI_W;
   write32le(buf + 0, insn(PCADDU12I, R_T2, hi20(offset), 0));
   write32le(buf + 4, insn(sub, R_T1, R_T1, R_T3));
   write32le(buf + 8, insn(ld, R_T3, R_T2, lo12(offset)));
   write32le(buf + 12,
             insn(addi, R_T1, R_T1, lo12(-ctx.target->pltHeaderSize - 12)));
   write32le(buf + 16, insn(addi, R_T0, R_T2, lo12(offset)));
-  write32le(buf + 20, insn(srli, R_T1, R_T1, config->is64 ? 1 : 2));
-  write32le(buf + 24, insn(ld, R_T0, R_T0, config->wordsize));
+  write32le(buf + 20, insn(srli, R_T1, R_T1, ctx.arg.is64 ? 1 : 2));
+  write32le(buf + 24, insn(ld, R_T0, R_T0, ctx.arg.wordsize));
   write32le(buf + 28, insn(JIRL, R_ZERO, R_T3, 0));
 }
 
@@ -369,7 +369,7 @@ void LoongArch::writePlt(uint8_t *buf, const Symbol &sym,
   uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
   write32le(buf + 0, insn(PCADDU12I, R_T3, hi20(offset), 0));
   write32le(buf + 4,
-            insn(config->is64 ? LD_D : LD_W, R_T3, R_T3, lo12(offset)));
+            insn(ctx.arg.is64 ? LD_D : LD_W, R_T3, R_T3, lo12(offset)));
   write32le(buf + 8, insn(JIRL, R_T1, R_T3, 0));
   write32le(buf + 12, insn(ANDI, R_ZERO, R_ZERO, 0));
 }
@@ -496,7 +496,7 @@ RelExpr LoongArch::getRelExpr(const RelType type, const Symbol &s,
     return R_TLSGD_GOT;
   case R_LARCH_TLS_LE_ADD_R:
   case R_LARCH_RELAX:
-    return config->relax ? R_RELAX_HINT : R_NONE;
+    return ctx.arg.relax ? R_RELAX_HINT : R_NONE;
   case R_LARCH_ALIGN:
     return R_RELAX_HINT;
   case R_LARCH_TLS_DESC_PC_HI20:
@@ -821,7 +821,7 @@ static bool relax(InputSection &sec) {
 // change in section sizes can have cascading effect and require another
 // relaxation pass.
 bool LoongArch::relaxOnce(int pass) const {
-  if (config->relocatable)
+  if (ctx.arg.relocatable)
     return false;
 
   if (pass == 0)

diff  --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index f5920c75ca76d5..8822be5ea8d5b5 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -77,7 +77,7 @@ template <class ELFT>
 RelExpr MIPS<ELFT>::getRelExpr(RelType type, const Symbol &s,
                                const uint8_t *loc) const {
   // See comment in the calculateMipsRelChain.
-  if (ELFT::Is64Bits || config->mipsN32Abi)
+  if (ELFT::Is64Bits || ctx.arg.mipsN32Abi)
     type &= 0xff;
 
   switch (type) {
@@ -283,7 +283,7 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
     return;
   }
 
-  if (config->mipsN32Abi) {
+  if (ctx.arg.mipsN32Abi) {
     write32(buf, 0x3c0e0000);      // lui   $14, %hi(&GOTPLT[0])
     write32(buf + 4, 0x8dd90000);  // lw    $25, %lo(&GOTPLT[0])($14)
     write32(buf + 8, 0x25ce0000);  // addiu $14, $14, %lo(&GOTPLT[0])
@@ -306,7 +306,7 @@ template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
     write32(buf + 20, 0x0018c082); // srl   $24, $24, 2
   }
 
-  uint32_t jalrInst = config->zHazardplt ? 0x0320fc09 : 0x0320f809;
+  uint32_t jalrInst = ctx.arg.zHazardplt ? 0x0320fc09 : 0x0320f809;
   write32(buf + 24, jalrInst); // jalr.hb $25 or jalr $25
   write32(buf + 28, 0x2718fffe); // subu  $24, $24, 2
 
@@ -341,8 +341,8 @@ void MIPS<ELFT>::writePlt(uint8_t *buf, const Symbol &sym,
   }
 
   uint32_t loadInst = ELFT::Is64Bits ? 0xddf90000 : 0x8df90000;
-  uint32_t jrInst = isMipsR6() ? (config->zHazardplt ? 0x03200409 : 0x03200009)
-                               : (config->zHazardplt ? 0x03200408 : 0x03200008);
+  uint32_t jrInst = isMipsR6() ? (ctx.arg.zHazardplt ? 0x03200409 : 0x03200009)
+                               : (ctx.arg.zHazardplt ? 0x03200408 : 0x03200008);
   uint32_t addInst = ELFT::Is64Bits ? 0x65f80000 : 0x25f80000;
 
   write32(buf, 0x3c0f0000);     // lui   $15, %hi(.got.plt entry)
@@ -465,7 +465,7 @@ int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *buf, RelType type) const {
   case (R_MIPS_64 << 8) | R_MIPS_REL32:
     return read64(buf);
   case R_MIPS_COPY:
-    return config->is64 ? read64(buf) : read32(buf);
+    return ctx.arg.is64 ? read64(buf) : read32(buf);
   case R_MIPS_NONE:
   case R_MIPS_JUMP_SLOT:
   case R_MIPS_JALR:
@@ -570,7 +570,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
   const endianness e = ELFT::Endianness;
   RelType type = rel.type;
 
-  if (ELFT::Is64Bits || config->mipsN32Abi)
+  if (ELFT::Is64Bits || ctx.arg.mipsN32Abi)
     std::tie(type, val) = calculateMipsRelChain(loc, type, val);
 
   // Detect cross-mode jump/branch and fix instruction.
@@ -604,7 +604,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     // The R_MIPS_GOT16 relocation's value in "relocatable" linking mode
     // is updated addend (not a GOT index). In that case write high 16 bits
     // to store a correct addend value.
-    if (config->relocatable) {
+    if (ctx.arg.relocatable) {
       writeValue(loc, val + 0x8000, 16, 16);
     } else {
       checkInt(loc, val, 16, rel);
@@ -612,7 +612,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     }
     break;
   case R_MICROMIPS_GOT16:
-    if (config->relocatable) {
+    if (ctx.arg.relocatable) {
       writeShuffleValue<e>(loc, val + 0x8000, 16, 16);
     } else {
       checkInt(loc, val, 16, rel);

diff  --git a/lld/ELF/Arch/MipsArchTree.cpp b/lld/ELF/Arch/MipsArchTree.cpp
index 44661731a3e2f8..959bd21a7aa6df 100644
--- a/lld/ELF/Arch/MipsArchTree.cpp
+++ b/lld/ELF/Arch/MipsArchTree.cpp
@@ -70,7 +70,7 @@ static void checkFlags(ArrayRef<FileFlags> files) {
   bool fp = files[0].flags & EF_MIPS_FP64;
 
   for (const FileFlags &f : files) {
-    if (config->is64 && f.flags & EF_MIPS_MICROMIPS)
+    if (ctx.arg.is64 && f.flags & EF_MIPS_MICROMIPS)
       error(toString(f.file) + ": microMIPS 64-bit is not supported");
 
     uint32_t abi2 = f.flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
@@ -301,9 +301,9 @@ template <class ELFT> uint32_t elf::calcMipsEFlags() {
     // If we don't have any input files, we'll have to rely on the information
     // we can derive from emulation information, since this at least gets us
     // ABI.
-    if (config->emulation.empty() || config->is64)
+    if (ctx.arg.emulation.empty() || ctx.arg.is64)
       return 0;
-    return config->mipsN32Abi ? EF_MIPS_ABI2 : EF_MIPS_ABI_O32;
+    return ctx.arg.mipsN32Abi ? EF_MIPS_ABI2 : EF_MIPS_ABI_O32;
   }
   checkFlags(v);
   return getMiscFlags(v) | getPicFlags(v) | getArchFlags(v);
@@ -367,7 +367,7 @@ template <class ELFT> static bool isN32Abi(const InputFile *f) {
 }
 
 bool elf::isMipsN32Abi(const InputFile *f) {
-  switch (config->ekind) {
+  switch (ctx.arg.ekind) {
   case ELF32LEKind:
     return isN32Abi<ELF32LE>(f);
   case ELF32BEKind:
@@ -381,10 +381,10 @@ bool elf::isMipsN32Abi(const InputFile *f) {
   }
 }
 
-bool elf::isMicroMips() { return config->eflags & EF_MIPS_MICROMIPS; }
+bool elf::isMicroMips() { return ctx.arg.eflags & EF_MIPS_MICROMIPS; }
 
 bool elf::isMipsR6() {
-  uint32_t arch = config->eflags & EF_MIPS_ARCH;
+  uint32_t arch = ctx.arg.eflags & EF_MIPS_ARCH;
   return arch == EF_MIPS_ARCH_32R6 || arch == EF_MIPS_ARCH_64R6;
 }
 

diff  --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index 53f760e5304dc0..c5f9de5a2f2a5c 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -65,18 +65,18 @@ static uint16_t lo(uint32_t v) { return v; }
 static uint16_t ha(uint32_t v) { return (v + 0x8000) >> 16; }
 
 static uint32_t readFromHalf16(const uint8_t *loc) {
-  return read32(config->isLE ? loc : loc - 2);
+  return read32(ctx.arg.isLE ? loc : loc - 2);
 }
 
 static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
-  write32(config->isLE ? loc : loc - 2, insn);
+  write32(ctx.arg.isLE ? loc : loc - 2, insn);
 }
 
 void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
   // Create canonical PLT entries for non-PIE code. Compilers don't generate
   // non-GOT-non-PLT relocations referencing external functions for -fpie/-fPIE.
   uint32_t glink = ctx.in.plt->getVA(); // VA of .glink
-  if (!config->isPic) {
+  if (!ctx.arg.isPic) {
     for (const Symbol *sym :
          cast<PPC32GlinkSection>(*ctx.in.plt).canonical_plts) {
       writePPC32PltCallStub(buf, sym->getGotPltVA(), nullptr, 0);
@@ -104,7 +104,7 @@ void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
   // itself) and calls _dl_runtime_resolve() (in glibc).
   uint32_t got = ctx.in.got->getVA();
   const uint8_t *end = buf + 64;
-  if (config->isPic) {
+  if (ctx.arg.isPic) {
     uint32_t afterBcl = 4 * ctx.in.plt->getNumEntries() + 12;
     uint32_t gotBcl = got + 4 - (glink + afterBcl);
     write32(buf + 0, 0x3d6b0000 | ha(afterBcl));  // addis r11,r11,1f-glink at ha

diff  --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index e7617ca0760908..803cc5402dda3c 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -245,7 +245,7 @@ unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) {
 }
 
 void elf::writePrefixedInstruction(uint8_t *loc, uint64_t insn) {
-  insn = config->isLE ? insn << 32 | insn >> 32 : insn;
+  insn = ctx.arg.isLE ? insn << 32 | insn >> 32 : insn;
   write64(loc, insn);
 }
 
@@ -379,7 +379,7 @@ getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
 // Returns true if the relaxation is performed.
 static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
                                         uint8_t *bufLoc) {
-  assert(config->tocOptimize);
+  assert(ctx.arg.tocOptimize);
   if (rel.addend < 0)
     return false;
 
@@ -392,7 +392,7 @@ static bool tryRelaxPPC64TocIndirection(const Relocation &rel,
   int64_t addend;
   auto *tocISB = cast<InputSectionBase>(defSym->section);
   std::tie(d, addend) =
-      config->isLE ? getRelaTocSymAndAddend<ELF64LE>(tocISB, rel.addend)
+      ctx.arg.isLE ? getRelaTocSymAndAddend<ELF64LE>(tocISB, rel.addend)
                    : getRelaTocSymAndAddend<ELF64BE>(tocISB, rel.addend);
 
   // Only non-preemptable defined symbols can be relaxed.
@@ -566,16 +566,16 @@ static int64_t getTotalDisp(uint64_t prefixedInsn, uint32_t accessInsn) {
 // little-endian it is pointing to the start of the word. These 2 helpers are to
 // simplify reading and writing in that context.
 static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
-  write32(config->isLE ? loc : loc - 2, insn);
+  write32(ctx.arg.isLE ? loc : loc - 2, insn);
 }
 
 static uint32_t readFromHalf16(const uint8_t *loc) {
-  return read32(config->isLE ? loc : loc - 2);
+  return read32(ctx.arg.isLE ? loc : loc - 2);
 }
 
 static uint64_t readPrefixedInstruction(const uint8_t *loc) {
   uint64_t fullInstr = read64(loc);
-  return config->isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
+  return ctx.arg.isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr;
 }
 
 PPC64::PPC64() {
@@ -762,7 +762,7 @@ void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
       // Since we are relocating a half16 type relocation and Loc + 4 points to
       // the start of an instruction we need to advance the buffer by an extra
       // 2 bytes on BE.
-      relocateNoSym(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
+      relocateNoSym(loc + 4 + (ctx.arg.ekind == ELF64BEKind ? 2 : 0),
                     R_PPC64_TPREL16_LO, val);
     } else if (locAsInt % 4 == 1) {
       write32(loc - 1, NOP);
@@ -909,7 +909,7 @@ void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
   // instruction, if we are accessing memory it will use any of the X-form
   // indexed load or store instructions.
 
-  unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0;
+  unsigned offset = (ctx.arg.ekind == ELF64BEKind) ? 2 : 0;
   switch (rel.type) {
   case R_PPC64_GOT_TPREL16_HA:
     write32(loc - offset, NOP);
@@ -1026,7 +1026,7 @@ RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
     return R_GOT_PC;
   case R_PPC64_TOC16_HA:
   case R_PPC64_TOC16_LO_DS:
-    return config->tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL;
+    return ctx.arg.tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL;
   case R_PPC64_TOC:
     return R_PPC64_TOCBASE;
   case R_PPC64_REL14:
@@ -1291,7 +1291,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_PPC64_ADDR16_HA:
   case R_PPC64_REL16_HA:
   case R_PPC64_TPREL16_HA:
-    if (config->tocOptimize && shouldTocOptimize && ha(val) == 0)
+    if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0)
       writeFromHalf16(loc, NOP);
     else {
       checkInt(loc, val + 0x8000, 32, rel);
@@ -1329,7 +1329,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     // When the high-adjusted part of a toc relocation evaluates to 0, it is
     // changed into a nop. The lo part then needs to be updated to use the
     // toc-pointer register r2, as the base register.
-    if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
+    if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
       uint32_t insn = readFromHalf16(loc);
       if (isInstructionUpdateForm(insn))
         error(getErrorLocation(loc) +
@@ -1347,7 +1347,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     uint32_t insn = readFromHalf16(loc);
     uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
     checkAlignment(loc, lo(val), mask + 1, rel);
-    if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
+    if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
       // When the high-adjusted part of a toc relocation evaluates to 0, it is
       // changed into a nop. The lo part then needs to be updated to use the toc
       // pointer register r2, as the base register.
@@ -1483,7 +1483,7 @@ RelExpr PPC64::adjustTlsExpr(RelType type, RelExpr expr) const {
 RelExpr PPC64::adjustGotPcExpr(RelType type, int64_t addend,
                                const uint8_t *loc) const {
   if ((type == R_PPC64_GOT_PCREL34 || type == R_PPC64_PCREL_OPT) &&
-      config->pcRelOptimize) {
+      ctx.arg.pcRelOptimize) {
     // It only makes sense to optimize pld since paddi means that the address
     // of the object in the GOT is required rather than the object itself.
     if ((readPrefixedInstruction(loc) & 0xfc000000) == 0xe4000000)
@@ -1726,13 +1726,13 @@ bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
   int32_t stackFrameSize = (hiImm * 65536) + loImm;
   // Check that the adjusted size doesn't overflow what we can represent with 2
   // instructions.
-  if (stackFrameSize < config->splitStackAdjustSize + INT32_MIN) {
+  if (stackFrameSize < ctx.arg.splitStackAdjustSize + INT32_MIN) {
     error(getErrorLocation(loc) + "split-stack prologue adjustment overflows");
     return false;
   }
 
   int32_t adjustedStackFrameSize =
-      stackFrameSize - config->splitStackAdjustSize;
+      stackFrameSize - ctx.arg.splitStackAdjustSize;
 
   loImm = adjustedStackFrameSize & 0xFFFF;
   hiImm = (adjustedStackFrameSize + 0x8000) >> 16;

diff  --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 242189050e8cab..4b02612bec870f 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -112,7 +112,7 @@ RISCV::RISCV() {
   pltRel = R_RISCV_JUMP_SLOT;
   relativeRel = R_RISCV_RELATIVE;
   iRelativeRel = R_RISCV_IRELATIVE;
-  if (config->is64) {
+  if (ctx.arg.is64) {
     symbolicRel = R_RISCV_64;
     tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64;
     tlsOffsetRel = R_RISCV_TLS_DTPREL64;
@@ -138,7 +138,7 @@ RISCV::RISCV() {
 }
 
 static uint32_t getEFlags(InputFile *f) {
-  if (config->is64)
+  if (ctx.arg.is64)
     return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
   return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
 }
@@ -188,33 +188,33 @@ int64_t RISCV::getImplicitAddend(const uint8_t *buf, RelType type) const {
     return read64le(buf);
   case R_RISCV_RELATIVE:
   case R_RISCV_IRELATIVE:
-    return config->is64 ? read64le(buf) : read32le(buf);
+    return ctx.arg.is64 ? read64le(buf) : read32le(buf);
   case R_RISCV_NONE:
   case R_RISCV_JUMP_SLOT:
     // These relocations are defined as not having an implicit addend.
     return 0;
   case R_RISCV_TLSDESC:
-    return config->is64 ? read64le(buf + 8) : read32le(buf + 4);
+    return ctx.arg.is64 ? read64le(buf + 8) : read32le(buf + 4);
   }
 }
 
 void RISCV::writeGotHeader(uint8_t *buf) const {
-  if (config->is64)
+  if (ctx.arg.is64)
     write64le(buf, ctx.mainPart->dynamic->getVA());
   else
     write32le(buf, ctx.mainPart->dynamic->getVA());
 }
 
 void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->is64)
+  if (ctx.arg.is64)
     write64le(buf, ctx.in.plt->getVA());
   else
     write32le(buf, ctx.in.plt->getVA());
 }
 
 void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->writeAddends) {
-    if (config->is64)
+  if (ctx.arg.writeAddends) {
+    if (ctx.arg.is64)
       write64le(buf, s.getVA());
     else
       write32le(buf, s.getVA());
@@ -231,14 +231,14 @@ void RISCV::writePltHeader(uint8_t *buf) const {
   // l[wd] t0, Wordsize(t0); t0 = link_map
   // jr t3
   uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
-  uint32_t load = config->is64 ? LD : LW;
+  uint32_t load = ctx.arg.is64 ? LD : LW;
   write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
   write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
   write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
   write32le(buf + 12, itype(ADDI, X_T1, X_T1, -ctx.target->pltHeaderSize - 12));
   write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
-  write32le(buf + 20, itype(SRLI, X_T1, X_T1, config->is64 ? 1 : 2));
-  write32le(buf + 24, itype(load, X_T0, X_T0, config->wordsize));
+  write32le(buf + 20, itype(SRLI, X_T1, X_T1, ctx.arg.is64 ? 1 : 2));
+  write32le(buf + 24, itype(load, X_T0, X_T0, ctx.arg.wordsize));
   write32le(buf + 28, itype(JALR, 0, X_T3, 0));
 }
 
@@ -250,7 +250,7 @@ void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
   // nop
   uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
   write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
-  write32le(buf + 4, itype(config->is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
+  write32le(buf + 4, itype(ctx.arg.is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
   write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
   write32le(buf + 12, itype(ADDI, 0, 0, 0));
 }
@@ -321,7 +321,7 @@ RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
     return R_RELAX_HINT;
   case R_RISCV_TPREL_ADD:
   case R_RISCV_RELAX:
-    return config->relax ? R_RELAX_HINT : R_NONE;
+    return ctx.arg.relax ? R_RELAX_HINT : R_NONE;
   case R_RISCV_SET_ULEB128:
   case R_RISCV_SUB_ULEB128:
     return R_RISCV_LEB128;
@@ -333,7 +333,7 @@ RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
 }
 
 void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
-  const unsigned bits = config->wordsize * 8;
+  const unsigned bits = ctx.arg.wordsize * 8;
 
   switch (rel.type) {
   case R_RISCV_32:
@@ -533,7 +533,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     return;
   case R_RISCV_TLSDESC:
     // The addend is stored in the second word.
-    if (config->is64)
+    if (ctx.arg.is64)
       write64le(loc + 8, val);
     else
       write32le(loc + 4, val);
@@ -557,7 +557,7 @@ static void tlsdescToIe(uint8_t *loc, const Relocation &rel, uint64_t val) {
     write32le(loc, utype(AUIPC, X_A0, hi20(val))); // auipc a0,<hi20>
     break;
   case R_RISCV_TLSDESC_CALL:
-    if (config->is64)
+    if (ctx.arg.is64)
       write32le(loc, itype(LD, X_A0, X_A0, lo12(val))); // ld a0,<lo12>(a0)
     else
       write32le(loc, itype(LW, X_A0, X_A0, lo12(val))); // lw a0,<lo12>(a0)
@@ -747,7 +747,7 @@ static void relaxCall(const InputSection &sec, size_t i, uint64_t loc,
     sec.relaxAux->writes.push_back(0xa001); // c.j
     remove = 6;
   } else if (rvc && isInt<12>(displace) && rd == X_RA &&
-             !config->is64) { // RV32C only
+             !ctx.arg.is64) { // RV32C only
     sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
     sec.relaxAux->writes.push_back(0x2001); // c.jal
     remove = 6;
@@ -914,7 +914,7 @@ static bool relax(InputSection &sec) {
 // relaxation pass.
 bool RISCV::relaxOnce(int pass) const {
   llvm::TimeTraceScope timeScope("RISC-V relaxOnce");
-  if (config->relocatable)
+  if (ctx.arg.relocatable)
     return false;
 
   if (pass == 0)

diff  --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index 3d637477e59dfd..484ffd7601ddc9 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -187,7 +187,7 @@ void SystemZ::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 }
 
 void SystemZ::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
-  if (config->writeAddends)
+  if (ctx.arg.writeAddends)
     write64be(buf, s.getVA());
 }
 
@@ -417,7 +417,7 @@ void SystemZ::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
 RelExpr SystemZ::adjustGotPcExpr(RelType type, int64_t addend,
                                  const uint8_t *loc) const {
   // Only R_390_GOTENT with addend 2 can be relaxed.
-  if (!config->relax || addend != 2 || type != R_390_GOTENT)
+  if (!ctx.arg.relax || addend != 2 || type != R_390_GOTENT)
     return R_GOT_PC;
   const uint16_t op = read16be(loc - 2);
 

diff  --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 6fb36233343918..e02038b1689c49 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -187,7 +187,7 @@ RelType X86::getDynRel(RelType type) const {
 }
 
 void X86::writePltHeader(uint8_t *buf) const {
-  if (config->isPic) {
+  if (ctx.arg.isPic) {
     const uint8_t v[] = {
         0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
         0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx)
@@ -211,7 +211,7 @@ void X86::writePltHeader(uint8_t *buf) const {
 void X86::writePlt(uint8_t *buf, const Symbol &sym,
                    uint64_t pltEntryAddr) const {
   unsigned relOff = ctx.in.relaPlt->entsize * sym.getPltIdx();
-  if (config->isPic) {
+  if (ctx.arg.isPic) {
     const uint8_t inst[] = {
         0xff, 0xa3, 0, 0, 0, 0, // jmp *foo at GOT(%ebx)
         0x68, 0,    0, 0, 0,    // pushl $reloc_offset
@@ -538,7 +538,7 @@ void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 
 void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
                         uint64_t /*pltEntryAddr*/) const {
-  if (config->isPic) {
+  if (ctx.arg.isPic) {
     const uint8_t inst[] = {
         0xf3, 0x0f, 0x1e, 0xfb,       // endbr32
         0xff, 0xa3, 0,    0,    0, 0, // jmp *name at GOT(%ebx)
@@ -711,8 +711,8 @@ void RetpolineNoPic::writePlt(uint8_t *buf, const Symbol &sym,
 }
 
 TargetInfo *elf::getX86TargetInfo() {
-  if (config->zRetpolineplt) {
-    if (config->isPic) {
+  if (ctx.arg.zRetpolineplt) {
+    if (ctx.arg.isPic) {
       static RetpolinePic t;
       return &t;
     }
@@ -720,7 +720,7 @@ TargetInfo *elf::getX86TargetInfo() {
     return &t;
   }
 
-  if (config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) {
+  if (ctx.arg.andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) {
     static IntelIBT t;
     return &t;
   }

diff  --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index 950bac8b655135..48f17718365e24 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -317,7 +317,7 @@ bool X86_64::relaxOnce(int pass) const {
   // If the max VA is under 2^31, GOTPCRELX relocations cannot overfow. In
   // -pie/-shared, the condition can be relaxed to test the max VA 
diff erence as
   // there is no R_RELAX_GOT_PC_NOPIC.
-  if (isUInt<31>(maxVA) || (isUInt<31>(maxVA - minVA) && config->isPic))
+  if (isUInt<31>(maxVA) || (isUInt<31>(maxVA - minVA) && ctx.arg.isPic))
     return false;
 
   SmallVector<InputSection *, 0> storage;
@@ -421,7 +421,7 @@ void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {
 
 void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
   // An x86 entry is the address of the ifunc resolver function (for -z rel).
-  if (config->writeAddends)
+  if (ctx.arg.writeAddends)
     write64le(buf, s.getVA());
 }
 
@@ -863,7 +863,7 @@ RelExpr X86_64::adjustGotPcExpr(RelType type, int64_t addend,
   // with addend != -4. Such an instruction does not load the full GOT entry, so
   // we cannot relax the relocation. E.g. movl x at GOTPCREL+4(%rip), %rax
   // (addend=0) loads the high 32 bits of the GOT entry.
-  if (!config->relax || addend != -4 ||
+  if (!ctx.arg.relax || addend != -4 ||
       (type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX))
     return R_GOT_PC;
   const uint8_t op = loc[-2];
@@ -886,7 +886,7 @@ RelExpr X86_64::adjustGotPcExpr(RelType type, int64_t addend,
 
   // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
   // If PIC then no relaxation is available.
-  return config->isPic ? R_GOT_PC : R_RELAX_GOT_PC_NOPIC;
+  return ctx.arg.isPic ? R_GOT_PC : R_RELAX_GOT_PC_NOPIC;
 }
 
 // A subset of relaxations can only be applied for no-PIC. This method
@@ -973,7 +973,7 @@ static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) {
   if (op != 0xff) {
     // We are relaxing a rip relative to an absolute, so compensate
     // for the old -4 addend.
-    assert(!config->isPic);
+    assert(!ctx.arg.isPic);
     relaxGotNoPic(loc, val + 4, op, modRm);
     return;
   }
@@ -1003,7 +1003,7 @@ static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) {
 // B) Or a load of a stack pointer offset with an lea to r10 or r11.
 bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
                                               uint8_t stOther) const {
-  if (!config->is64) {
+  if (!ctx.arg.is64) {
     error("target doesn't support split stacks");
     return false;
   }
@@ -1225,8 +1225,8 @@ void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym,
 }
 
 static TargetInfo *getTargetInfo() {
-  if (config->zRetpolineplt) {
-    if (config->zNow) {
+  if (ctx.arg.zRetpolineplt) {
+    if (ctx.arg.zNow) {
       static RetpolineZNow t;
       return &t;
     }
@@ -1234,7 +1234,7 @@ static TargetInfo *getTargetInfo() {
     return &t;
   }
 
-  if (config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) {
+  if (ctx.arg.andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) {
     static IntelIBT t;
     return &t;
   }


        


More information about the llvm-commits mailing list