[llvm-branch-commits] [llvm] adc55b5 - [X86] Avoid generating invalid R_X86_64_GOTPCRELX relocations
Harald van Dijk via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Fri Dec 18 15:44:06 PST 2020
Author: Harald van Dijk
Date: 2020-12-18T23:38:38Z
New Revision: adc55b5a5ae49f1fe3a04f7f79b1c08f508b4307
URL: https://github.com/llvm/llvm-project/commit/adc55b5a5ae49f1fe3a04f7f79b1c08f508b4307
DIFF: https://github.com/llvm/llvm-project/commit/adc55b5a5ae49f1fe3a04f7f79b1c08f508b4307.diff
LOG: [X86] Avoid generating invalid R_X86_64_GOTPCRELX relocations
We need to make sure not to emit R_X86_64_GOTPCRELX relocations for
instructions that use a REX prefix. If a REX prefix is present, we need to
instead use a R_X86_64_REX_GOTPCRELX relocation. The existing logic for
CALL64m, JMP64m, etc. already handles this by checking the HasREX parameter
and using it to determine which relocation type to use. Do this for all
instructions that can use relaxed relocations.
Reviewed By: MaskRay
Differential Revision: https://reviews.llvm.org/D93561
Added:
Modified:
lld/test/ELF/x86-64-gotpc-relax-nopic.s
llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
llvm/test/MC/X86/gotpcrelx.s
Removed:
llvm/test/MC/ELF/got-relaxed-rex.s
################################################################################
diff --git a/lld/test/ELF/x86-64-gotpc-relax-nopic.s b/lld/test/ELF/x86-64-gotpc-relax-nopic.s
index 501414f7bdde..81d25f9ecafb 100644
--- a/lld/test/ELF/x86-64-gotpc-relax-nopic.s
+++ b/lld/test/ELF/x86-64-gotpc-relax-nopic.s
@@ -23,8 +23,8 @@
# DISASM-NEXT: orl {{.*}}(%rip), %edi # 202240
# DISASM-NEXT: sbbl {{.*}}(%rip), %esi # 202240
# DISASM-NEXT: subl {{.*}}(%rip), %ebp # 202240
-# DISASM-NEXT: xorl {{.*}}(%rip), %r8d # 202240
-# DISASM-NEXT: testl %r15d, {{.*}}(%rip) # 202240
+# DISASM-NEXT: xorl $0x203248, %r8d
+# DISASM-NEXT: testl $0x203248, %r15d
# DISASM-NEXT: 201200: adcq $0x203248, %rax
# DISASM-NEXT: addq $0x203248, %rbx
# DISASM-NEXT: andq $0x203248, %rcx
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index 59860cad01f7..260253a5302d 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -409,6 +409,12 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
switch (Opcode) {
default:
return X86::reloc_riprel_4byte;
+ case X86::MOV64rm:
+ // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
+ // special case because COFF and Mach-O don't support ELF's more
+ // flexible R_X86_64_REX_GOTPCRELX relaxation.
+ assert(HasREX);
+ return X86::reloc_riprel_4byte_movq_load;
case X86::ADC32rm:
case X86::ADD32rm:
case X86::AND32rm:
@@ -419,13 +425,6 @@ void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
case X86::SUB32rm:
case X86::TEST32mr:
case X86::XOR32rm:
- return X86::reloc_riprel_4byte_relax;
- case X86::MOV64rm:
- // movq loads is a subset of reloc_riprel_4byte_relax_rex. It is a
- // special case because COFF and Mach-O don't support ELF's more
- // flexible R_X86_64_REX_GOTPCRELX relaxation.
- assert(HasREX);
- return X86::reloc_riprel_4byte_movq_load;
case X86::CALL64m:
case X86::JMP64m:
case X86::TAILJMPm64:
diff --git a/llvm/test/MC/ELF/got-relaxed-rex.s b/llvm/test/MC/ELF/got-relaxed-rex.s
deleted file mode 100644
index 1924bddc473e..000000000000
--- a/llvm/test/MC/ELF/got-relaxed-rex.s
+++ /dev/null
@@ -1,36 +0,0 @@
-// RUN: llvm-mc -filetype=obj -triple x86_64-pc-linux %s -o - | llvm-readobj -r - | FileCheck %s
-
-// these should produce R_X86_64_REX_GOTPCRELX
-
- movq mov at GOTPCREL(%rip), %rax
- test %rax, test at GOTPCREL(%rip)
- adc adc at GOTPCREL(%rip), %rax
- add add at GOTPCREL(%rip), %rax
- and and at GOTPCREL(%rip), %rax
- cmp cmp at GOTPCREL(%rip), %rax
- or or at GOTPCREL(%rip), %rax
- sbb sbb at GOTPCREL(%rip), %rax
- sub sub at GOTPCREL(%rip), %rax
- xor xor at GOTPCREL(%rip), %rax
-
-.section .norelax,"ax"
-## This expression loads the GOT entry with an offset.
-## Don't emit R_X86_64_REX_GOTPCRELX.
- movq mov at GOTPCREL+1(%rip), %rax
-
-// CHECK: Relocations [
-// CHECK-NEXT: Section ({{.*}}) .rela.text {
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
-// CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
-// CHECK-NEXT: }
-// CHECK-NEXT: Section ({{.*}}) .rela.norelax {
-// CHECK-NEXT: R_X86_64_GOTPCREL mov
-// CHECK-NEXT: }
diff --git a/llvm/test/MC/X86/gotpcrelx.s b/llvm/test/MC/X86/gotpcrelx.s
index 3889835a1683..91f20c6c567a 100644
--- a/llvm/test/MC/X86/gotpcrelx.s
+++ b/llvm/test/MC/X86/gotpcrelx.s
@@ -17,6 +17,26 @@
# CHECK-NEXT: R_X86_64_GOTPCRELX xor
# CHECK-NEXT: R_X86_64_GOTPCRELX call
# CHECK-NEXT: R_X86_64_GOTPCRELX jmp
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX mov
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX test
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX adc
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX add
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX and
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX cmp
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX or
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sbb
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX sub
+# CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
# CHECK-NEXT: }
# NORELAX-NEXT: R_X86_64_GOTPCREL mov
@@ -31,6 +51,26 @@
# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: R_X86_64_GOTPCREL call
# NORELAX-NEXT: R_X86_64_GOTPCREL jmp
+# NORELAX-NEXT: R_X86_64_GOTPCREL mov
+# NORELAX-NEXT: R_X86_64_GOTPCREL test
+# NORELAX-NEXT: R_X86_64_GOTPCREL adc
+# NORELAX-NEXT: R_X86_64_GOTPCREL add
+# NORELAX-NEXT: R_X86_64_GOTPCREL and
+# NORELAX-NEXT: R_X86_64_GOTPCREL cmp
+# NORELAX-NEXT: R_X86_64_GOTPCREL or
+# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
+# NORELAX-NEXT: R_X86_64_GOTPCREL sub
+# NORELAX-NEXT: R_X86_64_GOTPCREL xor
+# NORELAX-NEXT: R_X86_64_GOTPCREL mov
+# NORELAX-NEXT: R_X86_64_GOTPCREL test
+# NORELAX-NEXT: R_X86_64_GOTPCREL adc
+# NORELAX-NEXT: R_X86_64_GOTPCREL add
+# NORELAX-NEXT: R_X86_64_GOTPCREL and
+# NORELAX-NEXT: R_X86_64_GOTPCREL cmp
+# NORELAX-NEXT: R_X86_64_GOTPCREL or
+# NORELAX-NEXT: R_X86_64_GOTPCREL sbb
+# NORELAX-NEXT: R_X86_64_GOTPCREL sub
+# NORELAX-NEXT: R_X86_64_GOTPCREL xor
# NORELAX-NEXT: }
movl mov at GOTPCREL(%rip), %eax
@@ -46,8 +86,31 @@ xor xor at GOTPCREL(%rip), %eax
call *call at GOTPCREL(%rip)
jmp *jmp at GOTPCREL(%rip)
+movl mov at GOTPCREL(%rip), %r8d
+test %r8d, test at GOTPCREL(%rip)
+adc adc at GOTPCREL(%rip), %r8d
+add add at GOTPCREL(%rip), %r8d
+and and at GOTPCREL(%rip), %r8d
+cmp cmp at GOTPCREL(%rip), %r8d
+or or at GOTPCREL(%rip), %r8d
+sbb sbb at GOTPCREL(%rip), %r8d
+sub sub at GOTPCREL(%rip), %r8d
+xor xor at GOTPCREL(%rip), %r8d
+
+movq mov at GOTPCREL(%rip), %rax
+test %rax, test at GOTPCREL(%rip)
+adc adc at GOTPCREL(%rip), %rax
+add add at GOTPCREL(%rip), %rax
+and and at GOTPCREL(%rip), %rax
+cmp cmp at GOTPCREL(%rip), %rax
+or or at GOTPCREL(%rip), %rax
+sbb sbb at GOTPCREL(%rip), %rax
+sub sub at GOTPCREL(%rip), %rax
+xor xor at GOTPCREL(%rip), %rax
+
# COMMON-NEXT: Section ({{.*}}) .rela.norelax {
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0x0
+# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFD
# COMMON-NEXT: R_X86_64_GOTPCREL mov 0xFFFFFFFFFFFFFFFC
# COMMON-NEXT: }
# COMMON-NEXT: ]
@@ -56,5 +119,7 @@ jmp *jmp at GOTPCREL(%rip)
## Clang may emit this expression to load the high 32-bit of the GOT entry.
## Don't emit R_X86_64_GOTPCRELX.
movl mov at GOTPCREL+4(%rip), %eax
+## Don't emit R_X86_64_GOTPCRELX.
+movq mov at GOTPCREL+1(%rip), %rax
## We could emit R_X86_64_GOTPCRELX, but it is probably unnecessary.
movl mov at GOTPCREL+0(%rip), %eax
More information about the llvm-branch-commits
mailing list