[lld] 2b5cb1b - [ELF] getRelocTargetVA: pass Ctx and Relocation. NFC
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 6 16:34:15 PDT 2024
Author: Fangrui Song
Date: 2024-10-06T16:34:09-07:00
New Revision: 2b5cb1bf628fc54473355e0675f629d9332089df
URL: https://github.com/llvm/llvm-project/commit/2b5cb1bf628fc54473355e0675f629d9332089df
DIFF: https://github.com/llvm/llvm-project/commit/2b5cb1bf628fc54473355e0675f629d9332089df.diff
LOG: [ELF] getRelocTargetVA: pass Ctx and Relocation. NFC
Added:
Modified:
lld/ELF/Arch/AArch64.cpp
lld/ELF/Arch/PPC.cpp
lld/ELF/Arch/PPC64.cpp
lld/ELF/Arch/RISCV.cpp
lld/ELF/Arch/SystemZ.cpp
lld/ELF/Arch/X86.cpp
lld/ELF/Arch/X86_64.cpp
lld/ELF/InputSection.cpp
lld/ELF/InputSection.h
lld/ELF/SyntheticSections.cpp
lld/ELF/Target.cpp
Removed:
################################################################################
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index a576fe38ea99f0..785ebcb30c760c 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -915,9 +915,7 @@ void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
const Relocation &rel = sec.relocs()[i];
uint8_t *loc = buf + rel.offset;
- const uint64_t val =
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr);
+ const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
if (needsGotForMemtag(rel)) {
relocate(loc, rel, val);
diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp
index 22e7f5c996e3d8..a4b19f6c4cdcc0 100644
--- a/lld/ELF/Arch/PPC.cpp
+++ b/lld/ELF/Arch/PPC.cpp
@@ -501,10 +501,8 @@ void PPC::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
secAddr += s->outSecOff;
for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
- const uint64_t val = SignExtend64(
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr),
- 32);
+ const uint64_t val =
+ SignExtend64(sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset), 32);
switch (rel.expr) {
case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
relaxTlsGdToIe(loc, rel, val);
diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp
index f1aa108c163943..3373850c67cc64 100644
--- a/lld/ELF/Arch/PPC64.cpp
+++ b/lld/ELF/Arch/PPC64.cpp
@@ -1569,9 +1569,7 @@ void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
uint64_t lastPPCRelaxedRelocOff = -1;
for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
- const uint64_t val =
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr);
+ const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
switch (rel.expr) {
case R_PPC64_RELAX_GOT_PC: {
// The R_PPC64_PCREL_OPT relocation must appear immediately after
diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp
index 052d39ed43c4f1..b099a9a5effdc2 100644
--- a/lld/ELF/Arch/RISCV.cpp
+++ b/lld/ELF/Arch/RISCV.cpp
@@ -602,9 +602,7 @@ void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
for (size_t i = 0, size = relocs.size(); i != size; ++i) {
const Relocation &rel = relocs[i];
uint8_t *loc = buf + rel.offset;
- uint64_t val =
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr);
+ uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
switch (rel.expr) {
case R_RELAX_HINT:
diff --git a/lld/ELF/Arch/SystemZ.cpp b/lld/ELF/Arch/SystemZ.cpp
index fc87103165fd4d..c87ab179651f1e 100644
--- a/lld/ELF/Arch/SystemZ.cpp
+++ b/lld/ELF/Arch/SystemZ.cpp
@@ -447,8 +447,7 @@ bool SystemZ::relaxOnce(int pass) const {
continue;
uint64_t v = sec->getRelocTargetVA(
- sec->file, rel.type, rel.addend,
- sec->getOutputSection()->addr + rel.offset, *rel.sym, rel.expr);
+ ctx, rel, sec->getOutputSection()->addr + rel.offset);
if (isInt<33>(v) && !(v & 1))
continue;
if (rel.sym->auxIdx == 0) {
diff --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp
index 0a16ca24fcb318..0343260840a4a9 100644
--- a/lld/ELF/Arch/X86.cpp
+++ b/lld/ELF/Arch/X86.cpp
@@ -487,10 +487,8 @@ void X86::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
secAddr += s->outSecOff;
for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
- const uint64_t val = SignExtend64(
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr),
- 32);
+ const uint64_t val =
+ SignExtend64(sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset), 32);
switch (rel.expr) {
case R_RELAX_TLS_GD_TO_IE_GOTPLT:
relaxTlsGdToIe(loc, rel, val);
diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp
index d58d0a2961d617..4c70d704496801 100644
--- a/lld/ELF/Arch/X86_64.cpp
+++ b/lld/ELF/Arch/X86_64.cpp
@@ -180,8 +180,7 @@ static bool isFallThruRelocation(InputSection &is, InputFile *file,
return false;
uint64_t addrLoc = is.getOutputSection()->addr + is.outSecOff + r.offset;
- uint64_t targetOffset = InputSectionBase::getRelocTargetVA(
- file, r.type, r.addend, addrLoc, *r.sym, r.expr);
+ uint64_t targetOffset = is.getRelocTargetVA(ctx, r, addrLoc);
// If this jmp is a fall thru, the target offset is the beginning of the
// next section.
@@ -331,10 +330,11 @@ bool X86_64::relaxOnce(int pass) const {
continue;
assert(rel.addend == -4);
- uint64_t v = sec->getRelocTargetVA(
- sec->file, rel.type, rel.expr == R_RELAX_GOT_PC_NOPIC ? 0 : -4,
- sec->getOutputSection()->addr + sec->outSecOff + rel.offset,
- *rel.sym, rel.expr);
+ Relocation rel1 = rel;
+ rel1.addend = rel.expr == R_RELAX_GOT_PC_NOPIC ? 0 : -4;
+ uint64_t v = sec->getRelocTargetVA(ctx, rel1,
+ sec->getOutputSection()->addr +
+ sec->outSecOff + rel.offset);
if (isInt<32>(v))
continue;
if (rel.sym->auxIdx == 0) {
@@ -1059,9 +1059,7 @@ void X86_64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
if (rel.expr == R_NONE) // See deleteFallThruJmpInsn
continue;
uint8_t *loc = buf + rel.offset;
- const uint64_t val =
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr);
+ const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
relocate(loc, rel, val);
}
if (sec.jumpInstrMod) {
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index 09267a515d0900..4328c085426a7c 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -717,10 +717,10 @@ static int64_t getTlsTpOffset(const Symbol &s) {
}
}
-uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
- int64_t a, uint64_t p,
- const Symbol &sym, RelExpr expr) {
- switch (expr) {
+uint64_t InputSectionBase::getRelocTargetVA(Ctx &ctx, const Relocation &r,
+ uint64_t p) const {
+ int64_t a = r.addend;
+ switch (r.expr) {
case R_ABS:
case R_DTPREL:
case R_RELAX_TLS_LD_TO_LE_ABS:
@@ -728,63 +728,63 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_AARCH64_AUTH:
case R_RISCV_ADD:
case R_RISCV_LEB128:
- return sym.getVA(a);
+ return r.sym->getVA(a);
case R_ADDEND:
return a;
case R_RELAX_HINT:
return 0;
case R_ARM_SBREL:
- return sym.getVA(a) - getARMStaticBase(sym);
+ return r.sym->getVA(a) - getARMStaticBase(*r.sym);
case R_GOT:
case R_RELAX_TLS_GD_TO_IE_ABS:
- return sym.getGotVA() + a;
+ return r.sym->getGotVA() + a;
case R_LOONGARCH_GOT:
- // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc type
+ // The LoongArch TLS GD relocs reuse the R_LARCH_GOT_PC_LO12 reloc r.type
// for their page offsets. The arithmetics are
diff erent in the TLS case
// so we have to duplicate some logic here.
- if (sym.hasFlag(NEEDS_TLSGD) && type != R_LARCH_TLS_IE_PC_LO12)
+ if (r.sym->hasFlag(NEEDS_TLSGD) && r.type != R_LARCH_TLS_IE_PC_LO12)
// Like R_LOONGARCH_TLSGD_PAGE_PC but taking the absolute value.
- return ctx.in.got->getGlobalDynAddr(sym) + a;
- return getRelocTargetVA(file, type, a, p, sym, R_GOT);
+ return ctx.in.got->getGlobalDynAddr(*r.sym) + a;
+ return r.sym->getGotVA() + a;
case R_GOTONLY_PC:
return ctx.in.got->getVA() + a - p;
case R_GOTPLTONLY_PC:
return ctx.in.gotPlt->getVA() + a - p;
case R_GOTREL:
case R_PPC64_RELAX_TOC:
- return sym.getVA(a) - ctx.in.got->getVA();
+ return r.sym->getVA(a) - ctx.in.got->getVA();
case R_GOTPLTREL:
- return sym.getVA(a) - ctx.in.gotPlt->getVA();
+ return r.sym->getVA(a) - ctx.in.gotPlt->getVA();
case R_GOTPLT:
case R_RELAX_TLS_GD_TO_IE_GOTPLT:
- return sym.getGotVA() + a - ctx.in.gotPlt->getVA();
+ return r.sym->getGotVA() + a - ctx.in.gotPlt->getVA();
case R_TLSLD_GOT_OFF:
case R_GOT_OFF:
case R_RELAX_TLS_GD_TO_IE_GOT_OFF:
- return sym.getGotOffset() + a;
+ return r.sym->getGotOffset() + a;
case R_AARCH64_GOT_PAGE_PC:
case R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC:
- return getAArch64Page(sym.getGotVA() + a) - getAArch64Page(p);
+ return getAArch64Page(r.sym->getGotVA() + a) - getAArch64Page(p);
case R_AARCH64_GOT_PAGE:
- return sym.getGotVA() + a - getAArch64Page(ctx.in.got->getVA());
+ return r.sym->getGotVA() + a - getAArch64Page(ctx.in.got->getVA());
case R_GOT_PC:
case R_RELAX_TLS_GD_TO_IE:
- return sym.getGotVA() + a - p;
+ return r.sym->getGotVA() + a - p;
case R_GOTPLT_GOTREL:
- return sym.getGotPltVA() + a - ctx.in.got->getVA();
+ return r.sym->getGotPltVA() + a - ctx.in.got->getVA();
case R_GOTPLT_PC:
- return sym.getGotPltVA() + a - p;
+ return r.sym->getGotPltVA() + a - p;
case R_LOONGARCH_GOT_PAGE_PC:
- if (sym.hasFlag(NEEDS_TLSGD))
- return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p,
- type);
- return getLoongArchPageDelta(sym.getGotVA() + a, p, type);
+ if (r.sym->hasFlag(NEEDS_TLSGD))
+ return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
+ r.type);
+ return getLoongArchPageDelta(r.sym->getGotVA() + a, p, r.type);
case R_MIPS_GOTREL:
- return sym.getVA(a) - ctx.in.mipsGot->getGp(file);
+ return r.sym->getVA(a) - ctx.in.mipsGot->getGp(file);
case R_MIPS_GOT_GP:
return ctx.in.mipsGot->getGp(file) + a;
case R_MIPS_GOT_GP_PC: {
- // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC type iif the target
+ // R_MIPS_LO16 expression has R_MIPS_GOT_GP_PC r.type iif the target
// is _gp_disp symbol. In that case we should use the following
// formula for calculation "AHL + GP - P + 4". For details see p. 4-19 at
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
@@ -792,9 +792,9 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// expressions: AHL + GP - P + 3 for %lo() and AHL + GP - P - 1 for %hi()
// to correctly handle less-significant bit of the microMIPS symbol.
uint64_t v = ctx.in.mipsGot->getGp(file) + a - p;
- if (type == R_MIPS_LO16 || type == R_MICROMIPS_LO16)
+ if (r.type == R_MIPS_LO16 || r.type == R_MICROMIPS_LO16)
v += 4;
- if (type == R_MICROMIPS_LO16 || type == R_MICROMIPS_HI16)
+ if (r.type == R_MICROMIPS_LO16 || r.type == R_MICROMIPS_HI16)
v -= 1;
return v;
}
@@ -803,42 +803,41 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// should be initialized by 'page address'. This address is high 16-bits
// of sum the symbol's value and the addend.
return ctx.in.mipsGot->getVA() +
- ctx.in.mipsGot->getPageEntryOffset(file, sym, a) -
+ ctx.in.mipsGot->getPageEntryOffset(file, *r.sym, a) -
ctx.in.mipsGot->getGp(file);
case R_MIPS_GOT_OFF:
case R_MIPS_GOT_OFF32:
// In case of MIPS if a GOT relocation has non-zero addend this addend
// should be applied to the GOT entry content not to the GOT entry offset.
- // That is why we use separate expression type.
+ // That is why we use separate expression r.type.
return ctx.in.mipsGot->getVA() +
- ctx.in.mipsGot->getSymEntryOffset(file, sym, a) -
+ ctx.in.mipsGot->getSymEntryOffset(file, *r.sym, a) -
ctx.in.mipsGot->getGp(file);
case R_MIPS_TLSGD:
return ctx.in.mipsGot->getVA() +
- ctx.in.mipsGot->getGlobalDynOffset(file, sym) -
+ ctx.in.mipsGot->getGlobalDynOffset(file, *r.sym) -
ctx.in.mipsGot->getGp(file);
case R_MIPS_TLSLD:
return ctx.in.mipsGot->getVA() + ctx.in.mipsGot->getTlsIndexOffset(file) -
ctx.in.mipsGot->getGp(file);
case R_AARCH64_PAGE_PC: {
- uint64_t val = sym.isUndefWeak() ? p + a : sym.getVA(a);
+ uint64_t val = r.sym->isUndefWeak() ? p + a : r.sym->getVA(a);
return getAArch64Page(val) - getAArch64Page(p);
}
case R_RISCV_PC_INDIRECT: {
- if (const Relocation *hiRel = getRISCVPCRelHi20(&sym, a))
- return getRelocTargetVA(file, hiRel->type, hiRel->addend, sym.getVA(),
- *hiRel->sym, hiRel->expr);
+ if (const Relocation *hiRel = getRISCVPCRelHi20(r.sym, a))
+ return getRelocTargetVA(ctx, *hiRel, r.sym->getVA());
return 0;
}
case R_LOONGARCH_PAGE_PC:
- return getLoongArchPageDelta(sym.getVA(a), p, type);
+ return getLoongArchPageDelta(r.sym->getVA(a), p, r.type);
case R_PC:
case R_ARM_PCA: {
uint64_t dest;
- if (expr == R_ARM_PCA)
+ if (r.expr == R_ARM_PCA)
// Some PC relative ARM (Thumb) relocations align down the place.
p = p & 0xfffffffc;
- if (sym.isUndefined()) {
+ if (r.sym->isUndefined()) {
// On ARM and AArch64 a branch to an undefined weak resolves to the next
// instruction, otherwise the place. On RISC-V, resolve an undefined weak
// to the same instruction to cause an infinite loop (making the user
@@ -846,38 +845,38 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// Note: if the symbol is hidden, its binding has been converted to local,
// so we just check isUndefined() here.
if (ctx.arg.emachine == EM_ARM)
- dest = getARMUndefinedRelativeWeakVA(type, a, p);
+ dest = getARMUndefinedRelativeWeakVA(r.type, a, p);
else if (ctx.arg.emachine == EM_AARCH64)
- dest = getAArch64UndefinedRelativeWeakVA(type, p) + a;
+ dest = getAArch64UndefinedRelativeWeakVA(r.type, p) + a;
else if (ctx.arg.emachine == EM_PPC)
dest = p;
else if (ctx.arg.emachine == EM_RISCV)
- dest = getRISCVUndefinedRelativeWeakVA(type, p) + a;
+ dest = getRISCVUndefinedRelativeWeakVA(r.type, p) + a;
else
- dest = sym.getVA(a);
+ dest = r.sym->getVA(a);
} else {
- dest = sym.getVA(a);
+ dest = r.sym->getVA(a);
}
return dest - p;
}
case R_PLT:
- return sym.getPltVA() + a;
+ return r.sym->getPltVA() + a;
case R_PLT_PC:
case R_PPC64_CALL_PLT:
- return sym.getPltVA() + a - p;
+ return r.sym->getPltVA() + a - p;
case R_LOONGARCH_PLT_PAGE_PC:
- return getLoongArchPageDelta(sym.getPltVA() + a, p, type);
+ return getLoongArchPageDelta(r.sym->getPltVA() + a, p, r.type);
case R_PLT_GOTPLT:
- return sym.getPltVA() + a - ctx.in.gotPlt->getVA();
+ return r.sym->getPltVA() + a - ctx.in.gotPlt->getVA();
case R_PLT_GOTREL:
- return sym.getPltVA() + a - ctx.in.got->getVA();
+ return r.sym->getPltVA() + a - ctx.in.got->getVA();
case R_PPC32_PLTREL:
// R_PPC_PLTREL24 uses the addend (usually 0 or 0x8000) to indicate r30
// stores _GLOBAL_OFFSET_TABLE_ or .got2+0x8000. The addend is ignored for
// target VA computation.
- return sym.getPltVA() - p;
+ return r.sym->getPltVA() - p;
case R_PPC64_CALL: {
- uint64_t symVA = sym.getVA(a);
+ uint64_t symVA = r.sym->getVA(a);
// If we have an undefined weak symbol, we might get here with a symbol
// address of zero. That could overflow, but the code must be unreachable,
// so don't bother doing anything at all.
@@ -890,13 +889,13 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// the callee. For local calls the caller and callee share the same
// TOC base and so the TOC pointer initialization code should be skipped by
// branching to the local entry point.
- return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(sym.stOther);
+ return symVA - p + getPPC64GlobalEntryToLocalEntryOffset(r.sym->stOther);
}
case R_PPC64_TOCBASE:
return getPPC64TocBase(ctx) + a;
case R_RELAX_GOT_PC:
case R_PPC64_RELAX_GOT_PC:
- return sym.getVA(a) - p;
+ return r.sym->getVA(a) - p;
case R_RELAX_TLS_GD_TO_LE:
case R_RELAX_TLS_IE_TO_LE:
case R_RELAX_TLS_LD_TO_LE:
@@ -905,36 +904,37 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
// --noinhibit-exec, even a non-weak undefined reference may reach here.
// Just return A, which matches R_ABS, and the behavior of some dynamic
// loaders.
- if (sym.isUndefined())
+ if (r.sym->isUndefined())
return a;
- return getTlsTpOffset(sym) + a;
+ return getTlsTpOffset(*r.sym) + a;
case R_RELAX_TLS_GD_TO_LE_NEG:
case R_TPREL_NEG:
- if (sym.isUndefined())
+ if (r.sym->isUndefined())
return a;
- return -getTlsTpOffset(sym) + a;
+ return -getTlsTpOffset(*r.sym) + a;
case R_SIZE:
- return sym.getSize() + a;
+ return r.sym->getSize() + a;
case R_TLSDESC:
- return ctx.in.got->getTlsDescAddr(sym) + a;
+ return ctx.in.got->getTlsDescAddr(*r.sym) + a;
case R_TLSDESC_PC:
- return ctx.in.got->getTlsDescAddr(sym) + a - p;
+ return ctx.in.got->getTlsDescAddr(*r.sym) + a - p;
case R_TLSDESC_GOTPLT:
- return ctx.in.got->getTlsDescAddr(sym) + a - ctx.in.gotPlt->getVA();
+ return ctx.in.got->getTlsDescAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
case R_AARCH64_TLSDESC_PAGE:
- return getAArch64Page(ctx.in.got->getTlsDescAddr(sym) + a) -
+ return getAArch64Page(ctx.in.got->getTlsDescAddr(*r.sym) + a) -
getAArch64Page(p);
case R_LOONGARCH_TLSDESC_PAGE_PC:
- return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(sym) + a, p, type);
+ return getLoongArchPageDelta(ctx.in.got->getTlsDescAddr(*r.sym) + a, p,
+ r.type);
case R_TLSGD_GOT:
- return ctx.in.got->getGlobalDynOffset(sym) + a;
+ return ctx.in.got->getGlobalDynOffset(*r.sym) + a;
case R_TLSGD_GOTPLT:
- return ctx.in.got->getGlobalDynAddr(sym) + a - ctx.in.gotPlt->getVA();
+ return ctx.in.got->getGlobalDynAddr(*r.sym) + a - ctx.in.gotPlt->getVA();
case R_TLSGD_PC:
- return ctx.in.got->getGlobalDynAddr(sym) + a - p;
+ return ctx.in.got->getGlobalDynAddr(*r.sym) + a - p;
case R_LOONGARCH_TLSGD_PAGE_PC:
- return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(sym) + a, p,
- type);
+ return getLoongArchPageDelta(ctx.in.got->getGlobalDynAddr(*r.sym) + a, p,
+ r.type);
case R_TLSLD_GOTPLT:
return ctx.in.got->getVA() + ctx.in.got->getTlsIndexOff() + a -
ctx.in.gotPlt->getVA();
diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h
index c368ed4dc2e9c2..51449acd5cad06 100644
--- a/lld/ELF/InputSection.h
+++ b/lld/ELF/InputSection.h
@@ -244,9 +244,7 @@ class InputSectionBase : public SectionBase {
// relocations, assuming that Buf points to this section's copy in
// the mmap'ed output buffer.
template <class ELFT> void relocate(uint8_t *buf, uint8_t *bufEnd);
- static uint64_t getRelocTargetVA(const InputFile *File, RelType Type,
- int64_t A, uint64_t P, const Symbol &Sym,
- RelExpr Expr);
+ uint64_t getRelocTargetVA(Ctx &, const Relocation &r, uint64_t p) const;
// The native ELF reloc data type is not very convenient to handle.
// So we convert ELF reloc records to our own records in Relocations.cpp.
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index ae7b414cef59b8..600a3c3389c4d8 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -1608,8 +1608,8 @@ int64_t DynamicReloc::computeAddend() const {
return addend;
case AddendOnlyWithTargetVA:
case AgainstSymbolWithTargetVA: {
- uint64_t ca = InputSection::getRelocTargetVA(inputSec->file, type, addend,
- getOffset(), *sym, expr);
+ uint64_t ca = inputSec->getRelocTargetVA(
+ ctx, Relocation{expr, type, 0, addend, sym}, getOffset());
return ctx.arg.is64 ? ca : SignExtend64<32>(ca);
}
case MipsMultiGotPage:
diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp
index d5d11b9549e03f..d1706fb1f2bbd7 100644
--- a/lld/ELF/Target.cpp
+++ b/lld/ELF/Target.cpp
@@ -154,9 +154,7 @@ void TargetInfo::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
const uint64_t val = SignExtend64(
- sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
- secAddr + rel.offset, *rel.sym, rel.expr),
- bits);
+ sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset), bits);
if (rel.expr != R_RELAX_HINT)
relocate(loc, rel, val);
}
More information about the llvm-commits
mailing list