[lld] [X86][LLD] Handle R_X86_64_CODE_4_GOTTPOFF relocation type (PR #116634)
Shengchen Kan via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 22 19:46:19 PST 2024
================
@@ -547,44 +548,66 @@ void X86_64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
}
}
-// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
-// R_X86_64_TPOFF32 so that it does not use GOT.
+// In some conditions, R_X86_64_GOTTPOFF/R_X86_64_CODE_4_GOTTPOFF relocation can
+// be optimized to R_X86_64_TPOFF32 so that it does not use GOT.
void X86_64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
uint64_t val) const {
uint8_t *inst = loc - 3;
uint8_t reg = loc[-1] >> 3;
uint8_t *regSlot = loc - 1;
- // Note that ADD with RSP or R12 is converted to ADD instead of LEA
- // because LEA with these registers needs 4 bytes to encode and thus
- // wouldn't fit the space.
-
- if (memcmp(inst, "\x48\x03\x25", 3) == 0) {
- // "addq foo at gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
- memcpy(inst, "\x48\x81\xc4", 3);
- } else if (memcmp(inst, "\x4c\x03\x25", 3) == 0) {
- // "addq foo at gottpoff(%rip),%r12" -> "addq $foo,%r12"
- memcpy(inst, "\x49\x81\xc4", 3);
- } else if (memcmp(inst, "\x4c\x03", 2) == 0) {
- // "addq foo at gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
- memcpy(inst, "\x4d\x8d", 2);
- *regSlot = 0x80 | (reg << 3) | reg;
- } else if (memcmp(inst, "\x48\x03", 2) == 0) {
- // "addq foo at gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
- memcpy(inst, "\x48\x8d", 2);
- *regSlot = 0x80 | (reg << 3) | reg;
- } else if (memcmp(inst, "\x4c\x8b", 2) == 0) {
- // "movq foo at gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
- memcpy(inst, "\x49\xc7", 2);
- *regSlot = 0xc0 | reg;
- } else if (memcmp(inst, "\x48\x8b", 2) == 0) {
- // "movq foo at gottpoff(%rip),%reg" -> "movq $foo,%reg"
- memcpy(inst, "\x48\xc7", 2);
- *regSlot = 0xc0 | reg;
+ if (rel.type == R_X86_64_GOTTPOFF) {
+ // Note that ADD with RSP or R12 is converted to ADD instead of LEA
+ // because LEA with these registers needs 4 bytes to encode and thus
+ // wouldn't fit the space.
+
+ if (memcmp(inst, "\x48\x03\x25", 3) == 0) {
+ // "addq foo at gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
+ memcpy(inst, "\x48\x81\xc4", 3);
+ } else if (memcmp(inst, "\x4c\x03\x25", 3) == 0) {
+ // "addq foo at gottpoff(%rip),%r12" -> "addq $foo,%r12"
+ memcpy(inst, "\x49\x81\xc4", 3);
+ } else if (memcmp(inst, "\x4c\x03", 2) == 0) {
+ // "addq foo at gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
+ memcpy(inst, "\x4d\x8d", 2);
+ *regSlot = 0x80 | (reg << 3) | reg;
+ } else if (memcmp(inst, "\x48\x03", 2) == 0) {
+ // "addq foo at gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
+ memcpy(inst, "\x48\x8d", 2);
+ *regSlot = 0x80 | (reg << 3) | reg;
+ } else if (memcmp(inst, "\x4c\x8b", 2) == 0) {
+ // "movq foo at gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
+ memcpy(inst, "\x49\xc7", 2);
+ *regSlot = 0xc0 | reg;
+ } else if (memcmp(inst, "\x48\x8b", 2) == 0) {
+ // "movq foo at gottpoff(%rip),%reg" -> "movq $foo,%reg"
+ memcpy(inst, "\x48\xc7", 2);
+ *regSlot = 0xc0 | reg;
+ } else {
+ Err(ctx)
+ << getErrorLoc(ctx, loc - 3)
+ << "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only";
+ }
} else {
----------------
KanRobert wrote:
`relaxTlsIeToLe` is only called for `R_X86_64_CODE_4_GOTTPOFF` and `R_X86_64_GOTTPOFF`. Probably, the code can be more readable by removing the first `assert` below and use
```
else if (rel.type == R_X86_64_CODE_4_GOTTPOFF) {
} else {
llvm_unreachable("Unsupported relocation type!")
}
And I think the following R_X86_64_CODE_6_GOTTPOFF support can benefit from this too.
The second `assert` should not be there, the diagnostic info should be always be reported by code like line 606
```
Err(ctx) << getErrorLoc(ctx, loc - 3)
<< "R_X86_64_CODE_4_GOTTPOFF must be used in MOVQ or ADDQ "
"instructions only";
```
, otherwise the error would be hidden in the release build. BTW, it seems `loc - 4` should be used here.
https://github.com/llvm/llvm-project/pull/116634
More information about the llvm-commits
mailing list