[llvm] b397795 - [JITLink][ELF] Implement R_X86_64_PLT32 relocations

Stefan Gränitz via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 3 04:08:44 PST 2020


Author: Stefan Gränitz
Date: 2020-11-03T12:05:54Z
New Revision: b397795f1a2f9e25669fe030dc3836f35e2e725e

URL: https://github.com/llvm/llvm-project/commit/b397795f1a2f9e25669fe030dc3836f35e2e725e
DIFF: https://github.com/llvm/llvm-project/commit/b397795f1a2f9e25669fe030dc3836f35e2e725e.diff

LOG: [JITLink][ELF] Implement R_X86_64_PLT32 relocations

Basic implementation for call and jmp branches with 32 bit offset. Branches to local targets produce
Branch32 edges that are resolved like a regular PCRel32 relocations. Branches to external (undefined)
targets produce Branch32ToStub edges and go through a PLT entry by default. If the target happens to
get resolved within the 32 bit range from the callsite, the edge is relaxed during post-allocation
optimization. There is a test for each of these cases.

Reviewed By: lhames

Differential Revision: https://reviews.llvm.org/D90331

Added: 
    

Modified: 
    llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
    llvm/test/ExecutionEngine/JITLink/X86/ELF_x86-64_relocations.s

Removed: 
    


################################################################################
diff  --git a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
index fe8eba03edf4..49933fc14d52 100644
--- a/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -73,7 +73,6 @@ class ELF_x86_64_GOTAndStubsBuilder
 
   void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
     assert(E.getKind() == Branch32 && "Not a Branch32 edge?");
-    assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
 
     // Set the edge kind to Branch32ToStub. We will use this to check for stub
     // optimization opportunities in the optimize ELF_x86_64_GOTAndStubs pass
@@ -160,11 +159,6 @@ static Error optimizeELF_x86_64_GOTAndStubs(LinkGraph &G) {
           });
         }
       } else if (E.getKind() == Branch32ToStub) {
-
-        // Switch the edge kind to PCRel32: Whether we change the edge target
-        // or not this will be the desired kind.
-        E.setKind(Branch32);
-
         auto &StubBlock = E.getTarget().getBlock();
         assert(StubBlock.getSize() ==
                    sizeof(ELF_x86_64_GOTAndStubsBuilder::StubContent) &&
@@ -185,6 +179,7 @@ static Error optimizeELF_x86_64_GOTAndStubs(LinkGraph &G) {
         int64_t Displacement = TargetAddr - EdgeAddr + 4;
         if (Displacement >= std::numeric_limits<int32_t>::min() &&
             Displacement <= std::numeric_limits<int32_t>::max()) {
+          E.setKind(Branch32);
           E.setTarget(GOTTarget);
           LLVM_DEBUG({
             dbgs() << "  Replaced stub branch with direct branch:\n    ";
@@ -232,6 +227,8 @@ class ELFLinkGraphBuilder_x86_64 {
     case ELF::R_X86_64_GOTPCRELX:
     case ELF::R_X86_64_REX_GOTPCRELX:
       return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32GOTLoad;
+    case ELF::R_X86_64_PLT32:
+      return ELF_x86_64_Edges::ELFX86RelocationKind::Branch32;
     }
     return make_error<JITLinkError>("Unsupported x86-64 relocation:" +
                                     formatv("{0:d}", Type));
@@ -639,6 +636,8 @@ class ELFJITLinker_x86_64 : public JITLinker<ELFJITLinker_x86_64> {
     char *FixupPtr = BlockWorkingMem + E.getOffset();
     JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
     switch (E.getKind()) {
+    case ELFX86RelocationKind::Branch32:
+    case ELFX86RelocationKind::Branch32ToStub:
     case ELFX86RelocationKind::PCRel32:
     case ELFX86RelocationKind::PCRel32GOTLoad: {
       int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
@@ -687,6 +686,10 @@ StringRef getELFX86RelocationKindName(Edge::Kind R) {
     return "Pointer64";
   case PCRel32GOTLoad:
     return "PCRel32GOTLoad";
+  case Branch32:
+    return "Branch32";
+  case Branch32ToStub:
+    return "Branch32ToStub";
   }
   return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
 }

diff  --git a/llvm/test/ExecutionEngine/JITLink/X86/ELF_x86-64_relocations.s b/llvm/test/ExecutionEngine/JITLink/X86/ELF_x86-64_relocations.s
index 2814dd2b4c8f..4ea2e9160561 100644
--- a/llvm/test/ExecutionEngine/JITLink/X86/ELF_x86-64_relocations.s
+++ b/llvm/test/ExecutionEngine/JITLink/X86/ELF_x86-64_relocations.s
@@ -1,7 +1,10 @@
 # RUN: rm -rf %t && mkdir -p %t
 # RUN: llvm-mc -triple=x86_64-unknown-linux -position-independent -filetype=obj -o %t/elf_reloc.o %s
 # RUN: llvm-jitlink -noexec -slab-allocate 100Kb -slab-address 0xfff00000 \
-# RUN:   -define-abs external_data=0x1 -check %s %t/elf_reloc.o
+# RUN:              -define-abs external_data=0x1 \
+# RUN:              -define-abs extern_in_range32=0xffe00000 \
+# RUN:              -define-abs extern_out_of_range32=0x7fff00000000 \
+# RUN:              -check %s %t/elf_reloc.o
 #
 # Test standard ELF relocations.
 
@@ -27,6 +30,56 @@ test_pcrel32:
 .Lend_test_pcrel32:
          .size   test_pcrel32, .Lend_test_pcrel32-test_pcrel32
 
+        .globl  named_func
+        .p2align       4, 0x90
+        .type   named_func, at function
+named_func:
+        xorq    %rax, %rax
+.Lend_named_func:
+        .size   named_func, .Lend_named_func-named_func
+
+# Check R_X86_64_PLT32 handling with a call to a local function. This produces a
+# Branch32 edge that is resolved like a regular PCRel32 (no PLT entry created).
+#
+# jitlink-check: decode_operand(test_call_local, 0) = named_func - next_pc(test_call_local)
+        .globl  test_call_local
+        .p2align       4, 0x90
+        .type   test_call_local, at function
+test_call_local:
+        callq   named_func
+.Lend_test_call_local:
+        .size   test_call_local, .Lend_test_call_local-test_call_local
+
+# Check R_X86_64_PLT32 handling with a call to an external. This produces a
+# Branch32ToStub edge, because externals are not defined locally. During
+# resolution, the target turns out to be in-range from the callsite and so the
+# edge is relaxed in post-allocation optimization.
+#
+# jitlink-check: decode_operand(test_call_extern, 0) = extern_in_range32 - next_pc(test_call_extern)
+        .globl  test_call_extern
+        .p2align       4, 0x90
+        .type   test_call_extern, at function
+test_call_extern:
+        callq   extern_in_range32 at plt
+.Lend_test_call_extern:
+        .size   test_call_extern, .Lend_test_call_extern-test_call_extern
+
+# Check R_X86_64_PLT32 handling with a call to an external via PLT. This
+# produces a Branch32ToStub edge, because externals are not defined locally.
+# As the target is out-of-range from the callsite, the edge keeps using its PLT
+# entry.
+#
+# jitlink-check: decode_operand(test_call_extern_plt, 0) = \
+# jitlink-check:     stub_addr(elf_reloc.o, extern_out_of_range32) - next_pc(test_call_extern_plt)
+# jitlink-check: *{8}(got_addr(elf_reloc.o, extern_out_of_range32)) = extern_out_of_range32
+        .globl  test_call_extern_plt
+        .p2align       4, 0x90
+        .type   test_call_extern_plt, at function
+test_call_extern_plt:
+        callq   extern_out_of_range32 at plt
+.Lend_test_call_extern_plt:
+        .size   test_call_extern_plt, .Lend_test_call_extern_plt-test_call_extern_plt
+
 # Test GOTPCREL handling. We want to check both the offset to the GOT entry and its
 # contents.
 # jitlink-check: decode_operand(test_gotpcrel, 4) = got_addr(elf_reloc.o, named_data) - next_pc(test_gotpcrel)


        


More information about the llvm-commits mailing list