[lld] 2c5dd03 - [ELF] Pass Ctx & to check*

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 13 11:14:46 PDT 2024


Author: Fangrui Song
Date: 2024-10-13T11:14:40-07:00
New Revision: 2c5dd03f55030338139a16c7ce5b2f406531905c

URL: https://github.com/llvm/llvm-project/commit/2c5dd03f55030338139a16c7ce5b2f406531905c
DIFF: https://github.com/llvm/llvm-project/commit/2c5dd03f55030338139a16c7ce5b2f406531905c.diff

LOG: [ELF] Pass Ctx & to check*

Added: 
    

Modified: 
    lld/ELF/Arch/AArch64.cpp
    lld/ELF/Arch/AMDGPU.cpp
    lld/ELF/Arch/ARM.cpp
    lld/ELF/Arch/AVR.cpp
    lld/ELF/Arch/Hexagon.cpp
    lld/ELF/Arch/LoongArch.cpp
    lld/ELF/Arch/MSP430.cpp
    lld/ELF/Arch/Mips.cpp
    lld/ELF/Arch/PPC.cpp
    lld/ELF/Arch/PPC64.cpp
    lld/ELF/Arch/RISCV.cpp
    lld/ELF/Arch/SPARCV9.cpp
    lld/ELF/Arch/SystemZ.cpp
    lld/ELF/Arch/X86.cpp
    lld/ELF/Arch/X86_64.cpp
    lld/ELF/Target.h

Removed: 
    


################################################################################
diff  --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index ae03fde21c7993..260307ac4c3dcb 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -484,17 +484,17 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   switch (rel.type) {
   case R_AARCH64_ABS16:
   case R_AARCH64_PREL16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16(ctx, loc, val);
     break;
   case R_AARCH64_ABS32:
   case R_AARCH64_PREL32:
-    checkIntUInt(loc, val, 32, rel);
+    checkIntUInt(ctx, loc, val, 32, rel);
     write32(ctx, loc, val);
     break;
   case R_AARCH64_PLT32:
   case R_AARCH64_GOTPCREL32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32(ctx, loc, val);
     break;
   case R_AARCH64_ABS64:
@@ -535,13 +535,13 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   case R_AARCH64_ADR_PREL_PG_HI21:
   case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
   case R_AARCH64_TLSDESC_ADR_PAGE21:
-    checkInt(loc, val, 33, rel);
+    checkInt(ctx, loc, val, 33, rel);
     [[fallthrough]];
   case R_AARCH64_ADR_PREL_PG_HI21_NC:
     write32AArch64Addr(loc, val >> 12);
     break;
   case R_AARCH64_ADR_PREL_LO21:
-    checkInt(loc, val, 21, rel);
+    checkInt(ctx, loc, val, 21, rel);
     write32AArch64Addr(loc, val);
     break;
   case R_AARCH64_JUMP26:
@@ -555,14 +555,14 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
     write32le(loc, 0x14000000);
     [[fallthrough]];
   case R_AARCH64_CALL26:
-    checkInt(loc, val, 28, rel);
+    checkInt(ctx, loc, val, 28, rel);
     writeMaskedBits32le(loc, (val & 0x0FFFFFFC) >> 2, 0x0FFFFFFC >> 2);
     break;
   case R_AARCH64_CONDBR19:
   case R_AARCH64_LD_PREL_LO19:
   case R_AARCH64_GOT_LD_PREL19:
-    checkAlignment(loc, val, 4, rel);
-    checkInt(loc, val, 21, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
+    checkInt(ctx, loc, val, 21, rel);
     writeMaskedBits32le(loc, (val & 0x1FFFFC) << 3, 0x1FFFFC << 3);
     break;
   case R_AARCH64_LDST8_ABS_LO12_NC:
@@ -571,12 +571,12 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
     break;
   case R_AARCH64_LDST16_ABS_LO12_NC:
   case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     write32Imm12(loc, getBits(val, 1, 11));
     break;
   case R_AARCH64_LDST32_ABS_LO12_NC:
   case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
-    checkAlignment(loc, val, 4, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32Imm12(loc, getBits(val, 2, 11));
     break;
   case R_AARCH64_LDST64_ABS_LO12_NC:
@@ -584,32 +584,32 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
   case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
   case R_AARCH64_TLSDESC_LD64_LO12:
-    checkAlignment(loc, val, 8, rel);
+    checkAlignment(ctx, loc, val, 8, rel);
     write32Imm12(loc, getBits(val, 3, 11));
     break;
   case R_AARCH64_LDST128_ABS_LO12_NC:
   case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
-    checkAlignment(loc, val, 16, rel);
+    checkAlignment(ctx, loc, val, 16, rel);
     write32Imm12(loc, getBits(val, 4, 11));
     break;
   case R_AARCH64_LD64_GOTPAGE_LO15:
-    checkAlignment(loc, val, 8, rel);
+    checkAlignment(ctx, loc, val, 8, rel);
     write32Imm12(loc, getBits(val, 3, 14));
     break;
   case R_AARCH64_MOVW_UABS_G0:
-    checkUInt(loc, val, 16, rel);
+    checkUInt(ctx, loc, val, 16, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_UABS_G0_NC:
     writeMaskedBits32le(loc, (val & 0xFFFF) << 5, 0xFFFF << 5);
     break;
   case R_AARCH64_MOVW_UABS_G1:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_UABS_G1_NC:
     writeMaskedBits32le(loc, (val & 0xFFFF0000) >> 11, 0xFFFF0000 >> 11);
     break;
   case R_AARCH64_MOVW_UABS_G2:
-    checkUInt(loc, val, 48, rel);
+    checkUInt(ctx, loc, val, 48, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_UABS_G2_NC:
     writeMaskedBits32le(loc, (val & 0xFFFF00000000) >> 27,
@@ -622,7 +622,7 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   case R_AARCH64_MOVW_PREL_G0:
   case R_AARCH64_MOVW_SABS_G0:
   case R_AARCH64_TLSLE_MOVW_TPREL_G0:
-    checkInt(loc, val, 17, rel);
+    checkInt(ctx, loc, val, 17, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_PREL_G0_NC:
   case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
@@ -631,7 +631,7 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   case R_AARCH64_MOVW_PREL_G1:
   case R_AARCH64_MOVW_SABS_G1:
   case R_AARCH64_TLSLE_MOVW_TPREL_G1:
-    checkInt(loc, val, 33, rel);
+    checkInt(ctx, loc, val, 33, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_PREL_G1_NC:
   case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
@@ -640,7 +640,7 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
   case R_AARCH64_MOVW_PREL_G2:
   case R_AARCH64_MOVW_SABS_G2:
   case R_AARCH64_TLSLE_MOVW_TPREL_G2:
-    checkInt(loc, val, 49, rel);
+    checkInt(ctx, loc, val, 49, rel);
     [[fallthrough]];
   case R_AARCH64_MOVW_PREL_G2_NC:
     writeSMovWImm(loc, val >> 32);
@@ -649,11 +649,11 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
     writeSMovWImm(loc, val >> 48);
     break;
   case R_AARCH64_TSTBR14:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     writeMaskedBits32le(loc, (val & 0xFFFC) << 3, 0xFFFC << 3);
     break;
   case R_AARCH64_TLSLE_ADD_TPREL_HI12:
-    checkUInt(loc, val, 24, rel);
+    checkUInt(ctx, loc, val, 24, rel);
     write32Imm12(loc, val >> 12);
     break;
   case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
@@ -682,7 +682,7 @@ void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
   //   movk    x0, #0x10
   //   nop
   //   nop
-  checkUInt(loc, val, 32, rel);
+  checkUInt(ctx, loc, val, 32, rel);
 
   switch (rel.type) {
   case R_AARCH64_TLSDESC_ADD_LO12:
@@ -734,7 +734,7 @@ void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
 
 void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
                              uint64_t val) const {
-  checkUInt(loc, val, 32, rel);
+  checkUInt(ctx, loc, val, 32, rel);
 
   if (rel.type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
     // Generate MOVZ.

diff  --git a/lld/ELF/Arch/AMDGPU.cpp b/lld/ELF/Arch/AMDGPU.cpp
index ce37d0adc5fbb8..b4bb029feb72ab 100644
--- a/lld/ELF/Arch/AMDGPU.cpp
+++ b/lld/ELF/Arch/AMDGPU.cpp
@@ -167,7 +167,7 @@ void AMDGPU::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     break;
   case R_AMDGPU_REL16: {
     int64_t simm = (static_cast<int64_t>(val) - 4) / 4;
-    checkInt(loc, simm, 16, rel);
+    checkInt(ctx, loc, simm, 16, rel);
     write16le(loc, simm);
     break;
   }

diff  --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp
index b1685fe0723833..9bb3604ce61cc5 100644
--- a/lld/ELF/Arch/ARM.cpp
+++ b/lld/ELF/Arch/ARM.cpp
@@ -576,7 +576,7 @@ static void encodeLdrGroup(Ctx &ctx, uint8_t *loc, const Relocation &rel,
     val = -val;
   }
   uint32_t imm = getRemAndLZForGroup(group, val).first;
-  checkUInt(loc, imm, 12, rel);
+  checkUInt(ctx, loc, imm, 12, rel);
   write32(ctx, loc, (read32(ctx, loc) & 0xff7ff000) | opcode | imm);
 }
 
@@ -594,7 +594,7 @@ static void encodeLdrsGroup(Ctx &ctx, uint8_t *loc, const Relocation &rel,
     val = -val;
   }
   uint32_t imm = getRemAndLZForGroup(group, val).first;
-  checkUInt(loc, imm, 8, rel);
+  checkUInt(ctx, loc, imm, 8, rel);
   write32(ctx, loc,
           (read32(ctx, loc) & 0xff7ff0f0) | opcode | ((imm & 0xf0) << 4) |
               (imm & 0xf));
@@ -622,7 +622,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     write32(ctx, loc, val);
     break;
   case R_ARM_PREL31:
-    checkInt(loc, val, 31, rel);
+    checkInt(ctx, loc, val, 31, rel);
     write32(ctx, loc, (read32(ctx, loc) & 0x80000000) | (val & ~0x80000000));
     break;
   case R_ARM_CALL: {
@@ -639,7 +639,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
       stateChangeWarning(ctx, loc, rel.type, *rel.sym);
     if (rel.sym->isFunc() ? bit0Thumb : isBlx) {
       // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
-      checkInt(loc, val, 26, rel);
+      checkInt(ctx, loc, val, 26, rel);
       write32(ctx, loc,
               0xfa000000 |                    // opcode
                   ((val & 2) << 23) |         // H
@@ -655,23 +655,23 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_ARM_JUMP24:
   case R_ARM_PC24:
   case R_ARM_PLT32:
-    checkInt(loc, val, 26, rel);
+    checkInt(ctx, loc, val, 26, rel);
     write32(ctx, loc,
             (read32(ctx, loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff));
     break;
   case R_ARM_THM_JUMP8:
     // We do a 9 bit check because val is right-shifted by 1 bit.
-    checkInt(loc, val, 9, rel);
+    checkInt(ctx, loc, val, 9, rel);
     write16(ctx, loc, (read32(ctx, loc) & 0xff00) | ((val >> 1) & 0x00ff));
     break;
   case R_ARM_THM_JUMP11:
     // We do a 12 bit check because val is right-shifted by 1 bit.
-    checkInt(loc, val, 12, rel);
+    checkInt(ctx, loc, val, 12, rel);
     write16(ctx, loc, (read32(ctx, loc) & 0xf800) | ((val >> 1) & 0x07ff));
     break;
   case R_ARM_THM_JUMP19:
     // Encoding T3: Val = S:J2:J1:imm6:imm11:0
-    checkInt(loc, val, 21, rel);
+    checkInt(ctx, loc, val, 21, rel);
     write16(ctx, loc,
             (read16(ctx, loc) & 0xfbc0) | // opcode cond
                 ((val >> 10) & 0x0400) |  // S
@@ -708,7 +708,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     if (!ctx.arg.armJ1J2BranchEncoding) {
       // Older Arm architectures do not support R_ARM_THM_JUMP24 and have
       // 
diff erent encoding rules and range due to J1 and J2 always being 1.
-      checkInt(loc, val, 23, rel);
+      checkInt(ctx, loc, val, 23, rel);
       write16(ctx, loc,
               0xf000 |                     // opcode
                   ((val >> 12) & 0x07ff)); // imm11
@@ -723,7 +723,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     [[fallthrough]];
   case R_ARM_THM_JUMP24:
     // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
-    checkInt(loc, val, 25, rel);
+    checkInt(ctx, loc, val, 25, rel);
     write16(ctx, loc,
             0xf000 |                     // opcode
                 ((val >> 14) & 0x0400) | // S
@@ -829,7 +829,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
       imm = -imm;
       sub = 0x00a0;
     }
-    checkUInt(loc, imm, 12, rel);
+    checkUInt(ctx, loc, imm, 12, rel);
     write16(ctx, loc, (read16(ctx, loc) & 0xfb0f) | sub | (imm & 0x800) >> 1);
     write16(ctx, loc + 2,
             (read16(ctx, loc + 2) & 0x8f00) | (imm & 0x700) << 4 |
@@ -843,8 +843,8 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     // bottom bit to recover S + A - Pa.
     if (rel.sym->isFunc())
       val &= ~0x1;
-    checkUInt(loc, val, 10, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkUInt(ctx, loc, val, 10, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write16(ctx, loc, (read16(ctx, loc) & 0xff00) | (val & 0x3fc) >> 2);
     break;
   case R_ARM_THM_PC12: {
@@ -861,7 +861,7 @@ void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
       imm12 = -imm12;
       u = 0;
     }
-    checkUInt(loc, imm12, 12, rel);
+    checkUInt(ctx, loc, imm12, 12, rel);
     write16(ctx, loc, read16(ctx, loc) | u);
     write16(ctx, loc + 2, (read16(ctx, loc + 2) & 0xf000) | imm12);
     break;

diff  --git a/lld/ELF/Arch/AVR.cpp b/lld/ELF/Arch/AVR.cpp
index 53c10b89e5db7e..4dc605c47059c1 100644
--- a/lld/ELF/Arch/AVR.cpp
+++ b/lld/ELF/Arch/AVR.cpp
@@ -119,19 +119,19 @@ bool AVR::needsThunk(RelExpr expr, RelType type, const InputFile *file,
 void AVR::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   switch (rel.type) {
   case R_AVR_8:
-    checkUInt(loc, val, 8, rel);
+    checkUInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_AVR_8_LO8:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     *loc = val & 0xff;
     break;
   case R_AVR_8_HI8:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     *loc = (val >> 8) & 0xff;
     break;
   case R_AVR_8_HLO8:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     *loc = (val >> 16) & 0xff;
     break;
   case R_AVR_16:
@@ -141,17 +141,17 @@ void AVR::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     write16le(loc, val & 0xffff);
     break;
   case R_AVR_16_PM:
-    checkAlignment(loc, val, 2, rel);
-    checkUInt(loc, val >> 1, 16, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
+    checkUInt(ctx, loc, val >> 1, 16, rel);
     write16le(loc, val >> 1);
     break;
   case R_AVR_32:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
 
   case R_AVR_LDI:
-    checkUInt(loc, val, 8, rel);
+    checkUInt(ctx, loc, val, 8, rel);
     writeLDI(loc, val & 0xff);
     break;
 
@@ -181,39 +181,39 @@ void AVR::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     break;
 
   case R_AVR_LO8_LDI_GS:
-    checkUInt(loc, val, 17, rel);
+    checkUInt(ctx, loc, val, 17, rel);
     [[fallthrough]];
   case R_AVR_LO8_LDI_PM:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (val >> 1) & 0xff);
     break;
   case R_AVR_HI8_LDI_GS:
-    checkUInt(loc, val, 17, rel);
+    checkUInt(ctx, loc, val, 17, rel);
     [[fallthrough]];
   case R_AVR_HI8_LDI_PM:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (val >> 9) & 0xff);
     break;
   case R_AVR_HH8_LDI_PM:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (val >> 17) & 0xff);
     break;
 
   case R_AVR_LO8_LDI_PM_NEG:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (-val >> 1) & 0xff);
     break;
   case R_AVR_HI8_LDI_PM_NEG:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (-val >> 9) & 0xff);
     break;
   case R_AVR_HH8_LDI_PM_NEG:
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     writeLDI(loc, (-val >> 17) & 0xff);
     break;
 
   case R_AVR_LDS_STS_16: {
-    checkUInt(loc, val, 7, rel);
+    checkUInt(ctx, loc, val, 7, rel);
     const uint16_t hi = val >> 4;
     const uint16_t lo = val & 0xf;
     write16le(loc, (read16le(loc) & 0xf8f0) | ((hi << 8) | lo));
@@ -221,41 +221,41 @@ void AVR::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   }
 
   case R_AVR_PORT5:
-    checkUInt(loc, val, 5, rel);
+    checkUInt(ctx, loc, val, 5, rel);
     write16le(loc, (read16le(loc) & 0xff07) | (val << 3));
     break;
   case R_AVR_PORT6:
-    checkUInt(loc, val, 6, rel);
+    checkUInt(ctx, loc, val, 6, rel);
     write16le(loc, (read16le(loc) & 0xf9f0) | (val & 0x30) << 5 | (val & 0x0f));
     break;
 
   // Since every jump destination is word aligned we gain an extra bit
   case R_AVR_7_PCREL: {
-    checkInt(loc, val - 2, 8, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val - 2, 8, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     const uint16_t target = (val - 2) >> 1;
     write16le(loc, (read16le(loc) & 0xfc07) | ((target & 0x7f) << 3));
     break;
   }
   case R_AVR_13_PCREL: {
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     const uint16_t target = (val - 2) >> 1;
     write16le(loc, (read16le(loc) & 0xf000) | (target & 0xfff));
     break;
   }
 
   case R_AVR_6:
-    checkInt(loc, val, 6, rel);
+    checkInt(ctx, loc, val, 6, rel);
     write16le(loc, (read16le(loc) & 0xd3f8) | (val & 0x20) << 8 |
                        (val & 0x18) << 7 | (val & 0x07));
     break;
   case R_AVR_6_ADIW:
-    checkInt(loc, val, 6, rel);
+    checkInt(ctx, loc, val, 6, rel);
     write16le(loc, (read16le(loc) & 0xff30) | (val & 0x30) << 2 | (val & 0x0F));
     break;
 
   case R_AVR_CALL: {
-    checkAlignment(loc, val, 2, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     uint16_t hi = val >> 17;
     uint16_t lo = val >> 1;
     write16le(loc, read16le(loc) | ((hi >> 1) << 4) | (hi & 1));

diff  --git a/lld/ELF/Arch/Hexagon.cpp b/lld/ELF/Arch/Hexagon.cpp
index 4f4d466ca75040..80fcb3b747d1ea 100644
--- a/lld/ELF/Arch/Hexagon.cpp
+++ b/lld/ELF/Arch/Hexagon.cpp
@@ -309,18 +309,18 @@ void Hexagon::relocate(uint8_t *loc, const Relocation &rel,
     or32le(loc, applyMask(0x0fff3fff, val >> 6));
     break;
   case R_HEX_B9_PCREL:
-    checkInt(loc, val, 11, rel);
+    checkInt(ctx, loc, val, 11, rel);
     or32le(loc, applyMask(0x003000fe, val >> 2));
     break;
   case R_HEX_B9_PCREL_X:
     or32le(loc, applyMask(0x003000fe, val & 0x3f));
     break;
   case R_HEX_B13_PCREL:
-    checkInt(loc, val, 15, rel);
+    checkInt(ctx, loc, val, 15, rel);
     or32le(loc, applyMask(0x00202ffe, val >> 2));
     break;
   case R_HEX_B15_PCREL:
-    checkInt(loc, val, 17, rel);
+    checkInt(ctx, loc, val, 17, rel);
     or32le(loc, applyMask(0x00df20fe, val >> 2));
     break;
   case R_HEX_B15_PCREL_X:
@@ -329,7 +329,7 @@ void Hexagon::relocate(uint8_t *loc, const Relocation &rel,
   case R_HEX_B22_PCREL:
   case R_HEX_GD_PLT_B22_PCREL:
   case R_HEX_PLT_B22_PCREL:
-    checkInt(loc, val, 22, rel);
+    checkInt(ctx, loc, val, 22, rel);
     or32le(loc, applyMask(0x1ff3ffe, val >> 2));
     break;
   case R_HEX_B22_PCREL_X:

diff  --git a/lld/ELF/Arch/LoongArch.cpp b/lld/ELF/Arch/LoongArch.cpp
index 638e0cfd02414f..5923cda2298b4e 100644
--- a/lld/ELF/Arch/LoongArch.cpp
+++ b/lld/ELF/Arch/LoongArch.cpp
@@ -552,7 +552,7 @@ void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
                          uint64_t val) const {
   switch (rel.type) {
   case R_LARCH_32_PCREL:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     [[fallthrough]];
   case R_LARCH_32:
   case R_LARCH_TLS_DTPREL32:
@@ -569,26 +569,26 @@ void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
   case R_LARCH_TLS_LD_PCREL20_S2:
   case R_LARCH_TLS_GD_PCREL20_S2:
   case R_LARCH_TLS_DESC_PCREL20_S2:
-    checkInt(loc, val, 22, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 22, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32le(loc, setJ20(read32le(loc), val >> 2));
     return;
 
   case R_LARCH_B16:
-    checkInt(loc, val, 18, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 18, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32le(loc, setK16(read32le(loc), val >> 2));
     return;
 
   case R_LARCH_B21:
-    checkInt(loc, val, 23, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 23, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32le(loc, setD5k16(read32le(loc), val >> 2));
     return;
 
   case R_LARCH_B26:
-    checkInt(loc, val, 28, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 28, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32le(loc, setD10k16(read32le(loc), val >> 2));
     return;
 
@@ -600,7 +600,7 @@ void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
     if (((int64_t)val + 0x20000) != llvm::SignExtend64(val + 0x20000, 38))
       reportRangeError(ctx, loc, rel, Twine(val), llvm::minIntN(38) - 0x20000,
                        llvm::maxIntN(38) - 0x20000);
-    checkAlignment(loc, val, 4, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     // Since jirl performs sign extension on the offset immediate, adds (1<<17)
     // to original val to get the correct hi20.
     uint32_t hi20 = extractBits(val + (1 << 17), 37, 18);
@@ -620,7 +620,7 @@ void LoongArch::relocate(uint8_t *loc, const Relocation &rel,
     // In this case, process like an R_LARCH_B16, but without overflow checking
     // and only taking the value's lowest 12 bits.
     if (isJirl(read32le(loc))) {
-      checkAlignment(loc, val, 4, rel);
+      checkAlignment(ctx, loc, val, 4, rel);
       val = SignExtend64<12>(val);
       write32le(loc, setK16(read32le(loc), val >> 2));
       return;

diff  --git a/lld/ELF/Arch/MSP430.cpp b/lld/ELF/Arch/MSP430.cpp
index b3aab52e179c97..5d48518c53d8df 100644
--- a/lld/ELF/Arch/MSP430.cpp
+++ b/lld/ELF/Arch/MSP430.cpp
@@ -62,23 +62,23 @@ RelExpr MSP430::getRelExpr(RelType type, const Symbol &s,
 void MSP430::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   switch (rel.type) {
   case R_MSP430_8:
-    checkIntUInt(loc, val, 8, rel);
+    checkIntUInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_MSP430_16:
   case R_MSP430_16_PCREL:
   case R_MSP430_16_BYTE:
   case R_MSP430_16_PCREL_BYTE:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16le(loc, val);
     break;
   case R_MSP430_32:
-    checkIntUInt(loc, val, 32, rel);
+    checkIntUInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
   case R_MSP430_10_PCREL: {
     int16_t offset = ((int16_t)val >> 1) - 1;
-    checkInt(loc, offset, 10, rel);
+    checkInt(ctx, loc, offset, 10, rel);
     write16le(loc, (read16le(loc) & 0xFC00) | (offset & 0x3FF));
     break;
   }

diff  --git a/lld/ELF/Arch/Mips.cpp b/lld/ELF/Arch/Mips.cpp
index 3fb18a892d2d7b..1d3000793ca268 100644
--- a/lld/ELF/Arch/Mips.cpp
+++ b/lld/ELF/Arch/Mips.cpp
@@ -611,7 +611,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     if (ctx.arg.relocatable) {
       writeValue(ctx, loc, val + 0x8000, 16, 16);
     } else {
-      checkInt(loc, val, 16, rel);
+      checkInt(ctx, loc, val, 16, rel);
       writeValue(ctx, loc, val, 16, 0);
     }
     break;
@@ -619,7 +619,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     if (ctx.arg.relocatable) {
       writeShuffle<e>(ctx, loc, val + 0x8000, 16, 16);
     } else {
-      checkInt(loc, val, 16, rel);
+      checkInt(ctx, loc, val, 16, rel);
       writeShuffle<e>(ctx, loc, val, 16, 0);
     }
     break;
@@ -630,7 +630,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
   case R_MIPS_TLS_GD:
   case R_MIPS_TLS_GOTTPREL:
   case R_MIPS_TLS_LDM:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     [[fallthrough]];
   case R_MIPS_CALL_LO16:
   case R_MIPS_GOT_LO16:
@@ -644,7 +644,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
   case R_MICROMIPS_GPREL16:
   case R_MICROMIPS_TLS_GD:
   case R_MICROMIPS_TLS_LDM:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     writeShuffle<e>(ctx, loc, val, 16, 0);
     break;
   case R_MICROMIPS_CALL16:
@@ -656,7 +656,7 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     writeShuffle<e>(ctx, loc, val, 16, 0);
     break;
   case R_MICROMIPS_GPREL7_S2:
-    checkInt(loc, val, 7, rel);
+    checkInt(ctx, loc, val, 7, rel);
     writeShuffle<e>(ctx, loc, val, 7, 2);
     break;
   case R_MIPS_CALL_HI16:
@@ -699,23 +699,23 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     // Ignore this optimization relocation for now
     break;
   case R_MIPS_PC16:
-    checkAlignment(loc, val, 4, rel);
-    checkInt(loc, val, 18, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
+    checkInt(ctx, loc, val, 18, rel);
     writeValue(ctx, loc, val, 16, 2);
     break;
   case R_MIPS_PC19_S2:
-    checkAlignment(loc, val, 4, rel);
-    checkInt(loc, val, 21, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
+    checkInt(ctx, loc, val, 21, rel);
     writeValue(ctx, loc, val, 19, 2);
     break;
   case R_MIPS_PC21_S2:
-    checkAlignment(loc, val, 4, rel);
-    checkInt(loc, val, 23, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
+    checkInt(ctx, loc, val, 23, rel);
     writeValue(ctx, loc, val, 21, 2);
     break;
   case R_MIPS_PC26_S2:
-    checkAlignment(loc, val, 4, rel);
-    checkInt(loc, val, 28, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
+    checkInt(ctx, loc, val, 28, rel);
     writeValue(ctx, loc, val, 26, 2);
     break;
   case R_MIPS_PC32:
@@ -723,35 +723,35 @@ void MIPS<ELFT>::relocate(uint8_t *loc, const Relocation &rel,
     break;
   case R_MICROMIPS_26_S1:
   case R_MICROMIPS_PC26_S1:
-    checkInt(loc, val, 27, rel);
+    checkInt(ctx, loc, val, 27, rel);
     writeShuffle<e>(ctx, loc, val, 26, 1);
     break;
   case R_MICROMIPS_PC7_S1:
-    checkInt(loc, val, 8, rel);
+    checkInt(ctx, loc, val, 8, rel);
     writeMicroRelocation16<e>(ctx, loc, val, 7, 1);
     break;
   case R_MICROMIPS_PC10_S1:
-    checkInt(loc, val, 11, rel);
+    checkInt(ctx, loc, val, 11, rel);
     writeMicroRelocation16<e>(ctx, loc, val, 10, 1);
     break;
   case R_MICROMIPS_PC16_S1:
-    checkInt(loc, val, 17, rel);
+    checkInt(ctx, loc, val, 17, rel);
     writeShuffle<e>(ctx, loc, val, 16, 1);
     break;
   case R_MICROMIPS_PC18_S3:
-    checkInt(loc, val, 21, rel);
+    checkInt(ctx, loc, val, 21, rel);
     writeShuffle<e>(ctx, loc, val, 18, 3);
     break;
   case R_MICROMIPS_PC19_S2:
-    checkInt(loc, val, 21, rel);
+    checkInt(ctx, loc, val, 21, rel);
     writeShuffle<e>(ctx, loc, val, 19, 2);
     break;
   case R_MICROMIPS_PC21_S1:
-    checkInt(loc, val, 22, rel);
+    checkInt(ctx, loc, val, 22, rel);
     writeShuffle<e>(ctx, loc, val, 21, 1);
     break;
   case R_MICROMIPS_PC23_S2:
-    checkInt(loc, val, 25, rel);
+    checkInt(ctx, loc, val, 25, rel);
     writeShuffle<e>(ctx, loc, val, 23, 2);
     break;
   default:

diff  --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index e9bd3ecdbdd523..3af4101fff606f 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -325,7 +325,7 @@ void PPC::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   std::tie(newType, val) = fromDTPREL(rel.type, val);
   switch (newType) {
   case R_PPC_ADDR16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16(ctx, loc, val);
     break;
   case R_PPC_GOT16:
@@ -333,7 +333,7 @@ void PPC::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_PPC_GOT_TLSLD16:
   case R_PPC_GOT_TPREL16:
   case R_PPC_TPREL16:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     write16(ctx, loc, val);
     break;
   case R_PPC_ADDR16_HA:
@@ -369,8 +369,8 @@ void PPC::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     break;
   case R_PPC_REL14: {
     uint32_t mask = 0x0000FFFC;
-    checkInt(loc, val, 16, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 16, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
     break;
   }
@@ -379,8 +379,8 @@ void PPC::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_PPC_LOCAL24PC:
   case R_PPC_PLTREL24: {
     uint32_t mask = 0x03FFFFFC;
-    checkInt(loc, val, 26, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 26, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
     break;
   }

diff  --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index 1c4f1c64390e33..d937492fe440d7 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -1267,27 +1267,27 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
 
   switch (type) {
   case R_PPC64_ADDR14: {
-    checkAlignment(loc, val, 4, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     // Preserve the AA/LK bits in the branch instruction
     uint8_t aalk = loc[3];
     write16(ctx, loc + 2, (aalk & 3) | (val & 0xfffc));
     break;
   }
   case R_PPC64_ADDR16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16(ctx, loc, val);
     break;
   case R_PPC64_ADDR32:
-    checkIntUInt(loc, val, 32, rel);
+    checkIntUInt(ctx, loc, val, 32, rel);
     write32(ctx, loc, val);
     break;
   case R_PPC64_ADDR16_DS:
   case R_PPC64_TPREL16_DS: {
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     // DQ-form instructions use bits 28-31 as part of the instruction encoding
     // DS-form instructions only use bits 30-31.
     uint16_t mask = isDQFormInstruction(readFromHalf16(ctx, loc)) ? 0xf : 0x3;
-    checkAlignment(loc, lo(val), mask + 1, rel);
+    checkAlignment(ctx, loc, lo(val), mask + 1, rel);
     write16(ctx, loc, (read16(ctx, loc) & mask) | lo(val));
   } break;
   case R_PPC64_ADDR16_HA:
@@ -1296,14 +1296,14 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0)
       writeFromHalf16(ctx, loc, NOP);
     else {
-      checkInt(loc, val + 0x8000, 32, rel);
+      checkInt(ctx, loc, val + 0x8000, 32, rel);
       write16(ctx, loc, ha(val));
     }
     break;
   case R_PPC64_ADDR16_HI:
   case R_PPC64_REL16_HI:
   case R_PPC64_TPREL16_HI:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write16(ctx, loc, hi(val));
     break;
   case R_PPC64_ADDR16_HIGH:
@@ -1347,7 +1347,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     // DS-form instructions only use bits 30-31.
     uint32_t insn = readFromHalf16(ctx, loc);
     uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
-    checkAlignment(loc, lo(val), mask + 1, rel);
+    checkAlignment(ctx, loc, lo(val), mask + 1, rel);
     if (ctx.arg.tocOptimize && shouldTocOptimize && ha(val) == 0) {
       // When the high-adjusted part of a toc relocation evaluates to 0, it is
       // changed into a nop. The lo part then needs to be updated to use the toc
@@ -1363,11 +1363,11 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     }
   } break;
   case R_PPC64_TPREL16:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     write16(ctx, loc, val);
     break;
   case R_PPC64_REL32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32(ctx, loc, val);
     break;
   case R_PPC64_ADDR64:
@@ -1377,16 +1377,16 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     break;
   case R_PPC64_REL14: {
     uint32_t mask = 0x0000FFFC;
-    checkInt(loc, val, 16, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 16, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
     break;
   }
   case R_PPC64_REL24:
   case R_PPC64_REL24_NOTOC: {
     uint32_t mask = 0x03FFFFFC;
-    checkInt(loc, val, 26, rel);
-    checkAlignment(loc, val, 4, rel);
+    checkInt(ctx, loc, val, 26, rel);
+    checkAlignment(ctx, loc, val, 4, rel);
     write32(ctx, loc, (read32(ctx, loc) & ~mask) | (val & mask));
     break;
   }
@@ -1408,7 +1408,7 @@ void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     const uint64_t si0Mask = 0x00000003ffff0000;
     const uint64_t si1Mask = 0x000000000000ffff;
     const uint64_t fullMask = 0x0003ffff0000ffff;
-    checkInt(loc, val, 34, rel);
+    checkInt(ctx, loc, val, 34, rel);
 
     uint64_t instr = readPrefixedInst(ctx, loc) & ~fullMask;
     writePrefixedInst(ctx, loc,

diff  --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index cbbda83c7f848e..1ae016e4de01ee 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -343,8 +343,8 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     return;
 
   case R_RISCV_RVC_BRANCH: {
-    checkInt(loc, val, 9, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 9, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     uint16_t insn = read16le(loc) & 0xE383;
     uint16_t imm8 = extractBits(val, 8, 8) << 12;
     uint16_t imm4_3 = extractBits(val, 4, 3) << 10;
@@ -358,8 +358,8 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   }
 
   case R_RISCV_RVC_JUMP: {
-    checkInt(loc, val, 12, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 12, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     uint16_t insn = read16le(loc) & 0xE003;
     uint16_t imm11 = extractBits(val, 11, 11) << 12;
     uint16_t imm4 = extractBits(val, 4, 4) << 11;
@@ -377,7 +377,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
 
   case R_RISCV_RVC_LUI: {
     int64_t imm = SignExtend64(val + 0x800, bits) >> 12;
-    checkInt(loc, imm, 6, rel);
+    checkInt(ctx, loc, imm, 6, rel);
     if (imm == 0) { // `c.lui rd, 0` is illegal, convert to `c.li rd, 0`
       write16le(loc, (read16le(loc) & 0x0F83) | 0x4000);
     } else {
@@ -389,8 +389,8 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   }
 
   case R_RISCV_JAL: {
-    checkInt(loc, val, 21, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 21, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
 
     uint32_t insn = read32le(loc) & 0xFFF;
     uint32_t imm20 = extractBits(val, 20, 20) << 31;
@@ -404,8 +404,8 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   }
 
   case R_RISCV_BRANCH: {
-    checkInt(loc, val, 13, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 13, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
 
     uint32_t insn = read32le(loc) & 0x1FFF07F;
     uint32_t imm12 = extractBits(val, 12, 12) << 31;
@@ -422,7 +422,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_RISCV_CALL:
   case R_RISCV_CALL_PLT: {
     int64_t hi = SignExtend64(val + 0x800, bits) >> 12;
-    checkInt(loc, hi, 20, rel);
+    checkInt(ctx, loc, hi, 20, rel);
     if (isInt<20>(hi)) {
       relocateNoSym(loc, R_RISCV_PCREL_HI20, val);
       relocateNoSym(loc + 4, R_RISCV_PCREL_LO12_I, val);
@@ -438,7 +438,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_RISCV_TPREL_HI20:
   case R_RISCV_HI20: {
     uint64_t hi = val + 0x800;
-    checkInt(loc, SignExtend64(hi, bits) >> 12, 20, rel);
+    checkInt(ctx, loc, SignExtend64(hi, bits) >> 12, 20, rel);
     write32le(loc, (read32le(loc) & 0xFFF) | (hi & 0xFFFFF000));
     return;
   }
@@ -467,7 +467,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case INTERNAL_R_RISCV_GPREL_S: {
     Defined *gp = ctx.sym.riscvGlobalPointer;
     int64_t displace = SignExtend64(val - gp->getVA(), bits);
-    checkInt(loc, displace, 12, rel);
+    checkInt(ctx, loc, displace, 12, rel);
     uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15);
     if (rel.type == INTERNAL_R_RISCV_GPREL_I)
       insn = setLO12_I(insn, displace);
@@ -517,7 +517,7 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_RISCV_32_PCREL:
   case R_RISCV_PLT32:
   case R_RISCV_GOT32_PCREL:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     return;
 

diff  --git a/lld/ELF/Arch/SPARCV9.cpp b/lld/ELF/Arch/SPARCV9.cpp
index bc9724c3ba342a..ef71014e12fa97 100644
--- a/lld/ELF/Arch/SPARCV9.cpp
+++ b/lld/ELF/Arch/SPARCV9.cpp
@@ -90,23 +90,23 @@ void SPARCV9::relocate(uint8_t *loc, const Relocation &rel,
   case R_SPARC_32:
   case R_SPARC_UA32:
     // V-word32
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     write32be(loc, val);
     break;
   case R_SPARC_DISP32:
     // V-disp32
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32be(loc, val);
     break;
   case R_SPARC_WDISP30:
   case R_SPARC_WPLT30:
     // V-disp30
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32be(loc, (read32be(loc) & ~0x3fffffff) | ((val >> 2) & 0x3fffffff));
     break;
   case R_SPARC_22:
     // V-imm22
-    checkUInt(loc, val, 22, rel);
+    checkUInt(ctx, loc, val, 22, rel);
     write32be(loc, (read32be(loc) & ~0x003fffff) | (val & 0x003fffff));
     break;
   case R_SPARC_GOT22:
@@ -117,12 +117,12 @@ void SPARCV9::relocate(uint8_t *loc, const Relocation &rel,
     break;
   case R_SPARC_HI22:
     // V-imm22
-    checkUInt(loc, val >> 10, 22, rel);
+    checkUInt(ctx, loc, val >> 10, 22, rel);
     write32be(loc, (read32be(loc) & ~0x003fffff) | ((val >> 10) & 0x003fffff));
     break;
   case R_SPARC_WDISP19:
     // V-disp19
-    checkInt(loc, val, 21, rel);
+    checkInt(ctx, loc, val, 21, rel);
     write32be(loc, (read32be(loc) & ~0x0007ffff) | ((val >> 2) & 0x0007ffff));
     break;
   case R_SPARC_GOT10:
@@ -141,7 +141,7 @@ void SPARCV9::relocate(uint8_t *loc, const Relocation &rel,
     break;
   case R_SPARC_HH22:
     // V-imm22
-    checkUInt(loc, val >> 42, 22, rel);
+    checkUInt(ctx, loc, val >> 42, 22, rel);
     write32be(loc, (read32be(loc) & ~0x003fffff) | ((val >> 42) & 0x003fffff));
     break;
   case R_SPARC_HM10:
@@ -150,7 +150,7 @@ void SPARCV9::relocate(uint8_t *loc, const Relocation &rel,
     break;
   case R_SPARC_H44:
     // V-imm22
-    checkUInt(loc, val >> 22, 22, rel);
+    checkUInt(ctx, loc, val >> 22, 22, rel);
     write32be(loc, (read32be(loc) & ~0x003fffff) | ((val >> 22) & 0x003fffff));
     break;
   case R_SPARC_M44:

diff  --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index 1bc3b60af5c44d..584379638ad981 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -493,20 +493,20 @@ void SystemZ::relocate(uint8_t *loc, const Relocation &rel,
   }
   switch (rel.type) {
   case R_390_8:
-    checkIntUInt(loc, val, 8, rel);
+    checkIntUInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_390_12:
   case R_390_GOT12:
   case R_390_GOTPLT12:
   case R_390_TLS_GOTIE12:
-    checkUInt(loc, val, 12, rel);
+    checkUInt(ctx, loc, val, 12, rel);
     write16be(loc, (read16be(loc) & 0xF000) | val);
     break;
   case R_390_PC12DBL:
   case R_390_PLT12DBL:
-    checkInt(loc, val, 13, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 13, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     write16be(loc, (read16be(loc) & 0xF000) | ((val >> 1) & 0x0FFF));
     break;
   case R_390_16:
@@ -514,31 +514,31 @@ void SystemZ::relocate(uint8_t *loc, const Relocation &rel,
   case R_390_GOTPLT16:
   case R_390_GOTOFF16:
   case R_390_PLTOFF16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16be(loc, val);
     break;
   case R_390_PC16:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     write16be(loc, val);
     break;
   case R_390_PC16DBL:
   case R_390_PLT16DBL:
-    checkInt(loc, val, 17, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 17, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     write16be(loc, val >> 1);
     break;
   case R_390_20:
   case R_390_GOT20:
   case R_390_GOTPLT20:
   case R_390_TLS_GOTIE20:
-    checkInt(loc, val, 20, rel);
+    checkInt(ctx, loc, val, 20, rel);
     write32be(loc, (read32be(loc) & 0xF00000FF) | ((val & 0xFFF) << 16) |
                        ((val & 0xFF000) >> 4));
     break;
   case R_390_PC24DBL:
   case R_390_PLT24DBL:
-    checkInt(loc, val, 25, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 25, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     loc[0] = val >> 17;
     loc[1] = val >> 9;
     loc[2] = val >> 1;
@@ -554,12 +554,12 @@ void SystemZ::relocate(uint8_t *loc, const Relocation &rel,
   case R_390_TLS_LDM32:
   case R_390_TLS_LDO32:
   case R_390_TLS_LE32:
-    checkIntUInt(loc, val, 32, rel);
+    checkIntUInt(ctx, loc, val, 32, rel);
     write32be(loc, val);
     break;
   case R_390_PC32:
   case R_390_PLT32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32be(loc, val);
     break;
   case R_390_PC32DBL:
@@ -568,8 +568,8 @@ void SystemZ::relocate(uint8_t *loc, const Relocation &rel,
   case R_390_GOTENT:
   case R_390_GOTPLTENT:
   case R_390_TLS_IEENT:
-    checkInt(loc, val, 33, rel);
-    checkAlignment(loc, val, 2, rel);
+    checkInt(ctx, loc, val, 33, rel);
+    checkAlignment(ctx, loc, val, 2, rel);
     write32be(loc, val >> 1);
     break;
   case R_390_64:

diff  --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 4e574a520f1ff1..58199cdb99a284 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -292,15 +292,15 @@ void X86::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     // R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
     // being used for some 16-bit programs such as boot loaders, so
     // we want to support them.
-    checkIntUInt(loc, val, 8, rel);
+    checkIntUInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_386_PC8:
-    checkInt(loc, val, 8, rel);
+    checkInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_386_16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16le(loc, val);
     break;
   case R_386_PC16:
@@ -314,7 +314,7 @@ void X86::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     // current location subtracted from it.
     // We just check that Val fits in 17 bits. This misses some cases, but
     // should have no false positives.
-    checkInt(loc, val, 17, rel);
+    checkInt(ctx, loc, val, 17, rel);
     write16le(loc, val);
     break;
   case R_386_32:
@@ -338,7 +338,7 @@ void X86::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_386_TLS_LE_32:
   case R_386_TLS_TPOFF:
   case R_386_TLS_TPOFF32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
   case R_386_TLS_DESC:

diff  --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index 30114a92b51e1e..df2983f2022818 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -777,23 +777,23 @@ static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val);
 void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   switch (rel.type) {
   case R_X86_64_8:
-    checkIntUInt(loc, val, 8, rel);
+    checkIntUInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_X86_64_PC8:
-    checkInt(loc, val, 8, rel);
+    checkInt(ctx, loc, val, 8, rel);
     *loc = val;
     break;
   case R_X86_64_16:
-    checkIntUInt(loc, val, 16, rel);
+    checkIntUInt(ctx, loc, val, 16, rel);
     write16le(loc, val);
     break;
   case R_X86_64_PC16:
-    checkInt(loc, val, 16, rel);
+    checkInt(ctx, loc, val, 16, rel);
     write16le(loc, val);
     break;
   case R_X86_64_32:
-    checkUInt(loc, val, 32, rel);
+    checkUInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
   case R_X86_64_32S:
@@ -804,7 +804,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
   case R_X86_64_PLT32:
   case R_X86_64_DTPOFF32:
   case R_X86_64_SIZE32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
   case R_X86_64_64:
@@ -824,7 +824,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     if (rel.expr != R_GOT_PC) {
       relaxGot(loc, rel, val);
     } else {
-      checkInt(loc, val, 32, rel);
+      checkInt(ctx, loc, val, 32, rel);
       write32le(loc, val);
     }
     break;
@@ -836,7 +836,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     } else if (rel.expr == R_RELAX_TLS_GD_TO_IE) {
       relaxTlsGdToIe(loc, rel, val);
     } else {
-      checkInt(loc, val, 32, rel);
+      checkInt(ctx, loc, val, 32, rel);
       write32le(loc, val);
     }
     break;
@@ -844,7 +844,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     if (rel.expr == R_RELAX_TLS_LD_TO_LE) {
       relaxTlsLdToLe(loc, rel, val);
     } else {
-      checkInt(loc, val, 32, rel);
+      checkInt(ctx, loc, val, 32, rel);
       write32le(loc, val);
     }
     break;
@@ -852,12 +852,12 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
     if (rel.expr == R_RELAX_TLS_IE_TO_LE) {
       relaxTlsIeToLe(loc, rel, val);
     } else {
-      checkInt(loc, val, 32, rel);
+      checkInt(ctx, loc, val, 32, rel);
       write32le(loc, val);
     }
     break;
   case R_X86_64_TPOFF32:
-    checkInt(loc, val, 32, rel);
+    checkInt(ctx, loc, val, 32, rel);
     write32le(loc, val);
     break;
 

diff  --git a/lld/ELF/Target.h b/lld/ELF/Target.h
index bdf5bae80f42b2..27986b0a127e8e 100644
--- a/lld/ELF/Target.h
+++ b/lld/ELF/Target.h
@@ -261,20 +261,22 @@ void reportRangeError(Ctx &ctx, uint8_t *loc, int64_t v, int n,
                       const Symbol &sym, const Twine &msg);
 
 // Make sure that V can be represented as an N bit signed integer.
-inline void checkInt(uint8_t *loc, int64_t v, int n, const Relocation &rel) {
+inline void checkInt(Ctx &ctx, uint8_t *loc, int64_t v, int n,
+                     const Relocation &rel) {
   if (v != llvm::SignExtend64(v, n))
     reportRangeError(ctx, loc, rel, Twine(v), llvm::minIntN(n),
                      llvm::maxIntN(n));
 }
 
 // Make sure that V can be represented as an N bit unsigned integer.
-inline void checkUInt(uint8_t *loc, uint64_t v, int n, const Relocation &rel) {
+inline void checkUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
+                      const Relocation &rel) {
   if ((v >> n) != 0)
     reportRangeError(ctx, loc, rel, Twine(v), 0, llvm::maxUIntN(n));
 }
 
 // Make sure that V can be represented as an N bit signed or unsigned integer.
-inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
+inline void checkIntUInt(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
                          const Relocation &rel) {
   // For the error message we should cast V to a signed integer so that error
   // messages show a small negative value rather than an extremely large one
@@ -283,7 +285,7 @@ inline void checkIntUInt(uint8_t *loc, uint64_t v, int n,
                      llvm::maxUIntN(n));
 }
 
-inline void checkAlignment(uint8_t *loc, uint64_t v, int n,
+inline void checkAlignment(Ctx &ctx, uint8_t *loc, uint64_t v, int n,
                            const Relocation &rel) {
   if ((v & (n - 1)) != 0)
     error(getErrorLoc(ctx, loc) + "improper alignment for relocation " +


        


More information about the llvm-commits mailing list