[llvm] r374476 - [JITLink] Add an initial implementation of JITLink for MachO/AArch64.
Lang Hames via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 10 16:37:51 PDT 2019
Author: lhames
Date: Thu Oct 10 16:37:51 2019
New Revision: 374476
URL: http://llvm.org/viewvc/llvm-project?rev=374476&view=rev
Log:
[JITLink] Add an initial implementation of JITLink for MachO/AArch64.
This implementation has support for all relocation types except TLV.
Compact unwind sections are not yet supported, so exceptions/unwinding will not
work.
Added:
llvm/trunk/include/llvm/ExecutionEngine/JITLink/MachO_arm64.h
llvm/trunk/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
llvm/trunk/test/ExecutionEngine/JITLink/AArch64/
llvm/trunk/test/ExecutionEngine/JITLink/AArch64/MachO_Arm64_relocations.s
llvm/trunk/test/ExecutionEngine/JITLink/AArch64/lit.local.cfg
Modified:
llvm/trunk/lib/ExecutionEngine/JITLink/CMakeLists.txt
llvm/trunk/lib/ExecutionEngine/JITLink/MachO.cpp
Added: llvm/trunk/include/llvm/ExecutionEngine/JITLink/MachO_arm64.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/JITLink/MachO_arm64.h?rev=374476&view=auto
==============================================================================
--- llvm/trunk/include/llvm/ExecutionEngine/JITLink/MachO_arm64.h (added)
+++ llvm/trunk/include/llvm/ExecutionEngine/JITLink/MachO_arm64.h Thu Oct 10 16:37:51 2019
@@ -0,0 +1,60 @@
+//===---- MachO_arm64.h - JIT link functions for MachO/arm64 ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// jit-link functions for MachO/arm64.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
+#define LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+namespace llvm {
+namespace jitlink {
+
+namespace MachO_arm64_Edges {
+
+enum MachOARM64RelocationKind : Edge::Kind {
+ Branch26 = Edge::FirstRelocation,
+ Pointer32,
+ Pointer64,
+ Pointer64Anon,
+ Page21,
+ PageOffset12,
+ GOTPage21,
+ GOTPageOffset12,
+ PointerToGOT,
+ PairedAddend,
+ LDRLiteral19,
+ Delta32,
+ Delta64,
+ NegDelta32,
+ NegDelta64,
+};
+
+} // namespace MachO_arm64_Edges
+
+/// jit-link the given object buffer, which must be a MachO arm64 object file.
+///
+/// If PrePrunePasses is empty then a default mark-live pass will be inserted
+/// that will mark all exported atoms live. If PrePrunePasses is not empty, the
+/// caller is responsible for including a pass to mark atoms as live.
+///
+/// If PostPrunePasses is empty then a default GOT-and-stubs insertion pass will
+/// be inserted. If PostPrunePasses is not empty then the caller is responsible
+/// for including a pass to insert GOT and stub edges.
+void jitLink_MachO_arm64(std::unique_ptr<JITLinkContext> Ctx);
+
+/// Return the string name of the given MachO arm64 edge kind.
+StringRef getMachOARM64RelocationKindName(Edge::Kind R);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
Modified: llvm/trunk/lib/ExecutionEngine/JITLink/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/JITLink/CMakeLists.txt?rev=374476&r1=374475&r2=374476&view=diff
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/JITLink/CMakeLists.txt (original)
+++ llvm/trunk/lib/ExecutionEngine/JITLink/CMakeLists.txt Thu Oct 10 16:37:51 2019
@@ -4,6 +4,7 @@ add_llvm_library(LLVMJITLink
JITLinkMemoryManager.cpp
EHFrameSupport.cpp
MachO.cpp
+ MachO_arm64.cpp
MachO_x86_64.cpp
MachOLinkGraphBuilder.cpp
Modified: llvm/trunk/lib/ExecutionEngine/JITLink/MachO.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/JITLink/MachO.cpp?rev=374476&r1=374475&r2=374476&view=diff
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/JITLink/MachO.cpp (original)
+++ llvm/trunk/lib/ExecutionEngine/JITLink/MachO.cpp Thu Oct 10 16:37:51 2019
@@ -14,6 +14,7 @@
#include "llvm/ExecutionEngine/JITLink/MachO.h"
#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Format.h"
@@ -64,6 +65,8 @@ void jitLink_MachO(std::unique_ptr<JITLi
});
switch (Header.cputype) {
+ case MachO::CPU_TYPE_ARM64:
+ return jitLink_MachO_arm64(std::move(Ctx));
case MachO::CPU_TYPE_X86_64:
return jitLink_MachO_x86_64(std::move(Ctx));
}
Added: llvm/trunk/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/JITLink/MachO_arm64.cpp?rev=374476&view=auto
==============================================================================
--- llvm/trunk/lib/ExecutionEngine/JITLink/MachO_arm64.cpp (added)
+++ llvm/trunk/lib/ExecutionEngine/JITLink/MachO_arm64.cpp Thu Oct 10 16:37:51 2019
@@ -0,0 +1,733 @@
+//===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/arm64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_arm64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj),
+ NumSymbols(Obj.getSymtabLoadCommand().nsyms) {
+ addCustomSectionParser(
+ "__eh_frame", [this](NormalizedSection &EHFrameSection) {
+ if (!EHFrameSection.Data)
+ return make_error<JITLinkError>(
+ "__eh_frame section is marked zero-fill");
+ return MachOEHFrameBinaryParser(
+ *this, EHFrameSection.Address,
+ StringRef(EHFrameSection.Data, EHFrameSection.Size),
+ *EHFrameSection.GraphSection, 8, 4, NegDelta32, Delta64)
+ .addToGraph();
+ });
+ }
+
+private:
+ static Expected<MachOARM64RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::ARM64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'.
+ // They may be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch26;
+ break;
+ case MachO::ARM64_RELOC_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Page21;
+ break;
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PageOffset12;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPage21;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PointerToGOT;
+ break;
+ case MachO::ARM64_RELOC_ADDEND:
+ if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
+ return PairedAddend;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported arm64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ memcpy(&RI, &ARI, sizeof(MachO::relocation_info));
+ return RI;
+ }
+
+ using PairRelocInfo =
+ std::tuple<MachOARM64RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ JITTargetAddress FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of arm64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ if (auto ToSymbolOrErr = findSymbolByAddress(FixupValue))
+ ToSymbol = &*ToSymbolOrErr;
+ else
+ return ToSymbolOrErr.takeError();
+ FixupValue -= ToSymbol->getAddress();
+ }
+
+ MachOARM64RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = &*FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ for (auto &S : Obj.sections()) {
+
+ JITTargetAddress SectionAddress = S.getAddress();
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Sanity check the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ dbgs() << "Processing " << getMachOARM64RelocationKindName(*Kind)
+ << " relocation at " << format("0x%016" PRIx64, FixupAddress)
+ << "\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation content extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ if (*Kind == PairedAddend) {
+ // If this is an Addend relocation then process it and move to the
+ // paired reloc.
+
+ Addend = RI.r_symbolnum;
+
+ if (RelItr == RelEnd)
+ return make_error<JITLinkError>("Unpaired Addend reloc at " +
+ formatv("{0:x16}", FixupAddress));
+ ++RelItr;
+ RI = getRelocationInfo(RelItr);
+
+ Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ if (*Kind != Branch26 & *Kind != Page21 && *Kind != PageOffset12)
+ return make_error<JITLinkError>(
+ "Invalid relocation pair: Addend + " +
+ getMachOARM64RelocationKindName(*Kind));
+ else
+ LLVM_DEBUG({
+ dbgs() << " pair is " << getMachOARM64RelocationKindName(*Kind)
+ << "`\n";
+ });
+
+ // Find the address of the value to fix up.
+ JITTargetAddress PairedFixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ if (PairedFixupAddress != FixupAddress)
+ return make_error<JITLinkError>("Paired relocation points at "
+ "different target");
+ }
+
+ switch (*Kind) {
+ case Branch26: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0x7fffffff) != 0x14000000)
+ return make_error<JITLinkError>("BRANCH26 target is not a B or BL "
+ "instruction with a zero addend");
+ break;
+ }
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xffffffe0) != 0x90000000)
+ return make_error<JITLinkError>("PAGE21/GOTPAGE21 target is not an "
+ "ADRP instruction with a zero "
+ "addend");
+ break;
+ }
+ case PageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ }
+ case GOTPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xfffffc00) != 0xf9400000)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target is not an LDR "
+ "immediate instruction with a zero "
+ "addend");
+ break;
+ }
+ case PointerToGOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOARM64RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ unsigned NumSymbols = 0;
+};
+
+class MachO_arm64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder> {
+public:
+ MachO_arm64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
+ E.getKind() == PointerToGOT;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ if (E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12) {
+ // Update the target, but leave the edge addend as-is.
+ E.setTarget(GOTEntry);
+ } else if (E.getKind() == PointerToGOT) {
+ E.setTarget(GOTEntry);
+ E.setKind(Delta32);
+ } else
+ llvm_unreachable("Not a GOT edge?");
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch26 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch26 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[8];
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t MachO_arm64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t MachO_arm64_GOTAndStubsBuilder::StubContent[8] = {
+ 0x10, 0x00, 0x00, 0x58, // LDR x16, <literal>
+ 0x00, 0x02, 0x1f, 0xd6 // BR x16
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_arm64 : public JITLinker<MachOJITLinker_arm64> {
+ friend class JITLinker<MachOJITLinker_arm64>;
+
+public:
+ MachOJITLinker_arm64(std::unique_ptr<JITLinkContext> Ctx,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getMachOARM64RelocationKindName(R);
+ }
+
+ Expected<std::unique_ptr<LinkGraph>>
+ buildGraph(MemoryBufferRef ObjBuffer) override {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_arm64(**MachOObj).buildGraph();
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getMachOARM64RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ static unsigned getPageOffset12Shift(uint32_t Instr) {
+ constexpr uint32_t LDRLiteralMask = 0x3ffffc00;
+
+ // Check for a GPR LDR immediate with a zero embedded literal.
+ // If found, the top two bits contain the shift.
+ if ((Instr & LDRLiteralMask) == 0x39400000)
+ return Instr >> 30;
+
+ // Check for a Neon LDR immediate of size 64-bit or less with a zero
+ // embedded literal. If found, the top two bits contain the shift.
+ if ((Instr & LDRLiteralMask) == 0x3d400000)
+ return Instr >> 30;
+
+ // Check for a Neon LDR immediate of size 128-bit with a zero embedded
+ // literal.
+ constexpr uint32_t SizeBitsMask = 0xc0000000;
+ if ((Instr & (LDRLiteralMask | SizeBitsMask)) == 0x3dc00000)
+ return 4;
+
+ return 0;
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+ using namespace support;
+
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch26: {
+ assert((FixupAddress & 0x3) == 0 && "Branch-inst is not 32-bit aligned");
+
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+
+ if (static_cast<uint64_t>(Value) & 0x3)
+ return make_error<JITLinkError>("Branch26 target is not 32-bit "
+ "aligned");
+
+ if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ assert((RawInstr & 0x7fffffff) == 0x14000000 &&
+ "RawInstr isn't a B or BR immediate instruction");
+ uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
+ uint32_t FixedInstr = RawInstr | Imm;
+ *(little32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ assert(E.getAddend() == 0 && "PAGE21/GOTPAGE21 with non-zero addend");
+ uint64_t TargetPage =
+ E.getTarget().getAddress() & ~static_cast<uint64_t>(4096 - 1);
+ uint64_t PCPage = B.getAddress() & ~static_cast<uint64_t>(4096 - 1);
+
+ int64_t PageDelta = TargetPage - PCPage;
+ if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xffffffe0) == 0x90000000 &&
+ "RawInstr isn't an ADRP instruction");
+ uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
+ uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
+ uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case PageOffset12: {
+ assert(E.getAddend() == 0 && "PAGEOFF12 with non-zero addend");
+ uint64_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ unsigned ImmShift = getPageOffset12Shift(RawInstr);
+
+ if (TargetOffset & ((1 << ImmShift) - 1))
+ return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
+
+ uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case GOTPageOffset12: {
+ assert(E.getAddend() == 0 && "GOTPAGEOF12 with non-zero addend");
+ uint64_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
+ "RawInstr isn't a 64-bit LDR immediate");
+ uint32_t FixedInstr = RawInstr | (TargetOffset << 10);
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case LDRLiteral19: {
+ assert((FixupAddress & 0x3) == 0 && "LDR is not 32-bit aligned");
+ assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
+ int64_t Delta = E.getTarget().getAddress() - FixupAddress;
+ if (Delta & 0x3)
+ return make_error<JITLinkError>("LDR literal target is not 32-bit "
+ "aligned");
+ if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t EncodedImm = (static_cast<uint32_t>(Delta) >> 2) << 5;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+void jitLink_MachO_arm64(std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ Triple TT("arm64-apple-ios");
+
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ MachO_arm64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(TT, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_arm64::link(std::move(Ctx), std::move(Config));
+}
+
+StringRef getMachOARM64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch26:
+ return "Branch26";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case Page21:
+ return "Page21";
+ case PageOffset12:
+ return "PageOffset12";
+ case GOTPage21:
+ return "GOTPage21";
+ case GOTPageOffset12:
+ return "GOTPageOffset12";
+ case PointerToGOT:
+ return "PointerToGOT";
+ case PairedAddend:
+ return "PairedAddend";
+ case LDRLiteral19:
+ return "LDRLiteral19";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
Added: llvm/trunk/test/ExecutionEngine/JITLink/AArch64/MachO_Arm64_relocations.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/ExecutionEngine/JITLink/AArch64/MachO_Arm64_relocations.s?rev=374476&view=auto
==============================================================================
--- llvm/trunk/test/ExecutionEngine/JITLink/AArch64/MachO_Arm64_relocations.s (added)
+++ llvm/trunk/test/ExecutionEngine/JITLink/AArch64/MachO_Arm64_relocations.s Thu Oct 10 16:37:51 2019
@@ -0,0 +1,339 @@
+# RUN: rm -rf %t && mkdir -p %t
+# RUN: llvm-mc -triple=arm64-apple-darwin19 -filetype=obj -o %t/macho_reloc.o %s
+# RUN: llvm-jitlink -noexec -define-abs external_data=0xdeadbeef -define-abs external_func=0xcafef00d -check=%s %t/macho_reloc.o
+
+ .section __TEXT,__text,regular,pure_instructions
+
+ .p2align 2
+Lanon_func:
+ ret
+
+ .globl named_func
+ .p2align 2
+named_func:
+ ret
+
+# Check ARM64_RELOC_BRANCH26 handling with a call to a local function.
+# The branch instruction only encodes 26 bits of the 28-bit possible branch
+# range, since the low 2 bits will always be zero.
+#
+# jitlink-check: decode_operand(test_local_call, 0)[25:0] = (named_func - test_local_call)[27:2]
+ .globl test_local_call
+ .p2align 2
+test_local_call:
+ bl named_func
+
+ .globl _main
+ .p2align 2
+_main:
+ ret
+
+# Check ARM64_RELOC_GOTPAGE21 / ARM64_RELOC_GOTPAGEOFF12 handling with a
+# reference to an external symbol. Validate both the reference to the GOT entry,
+# and also the content of the GOT entry.
+#
+# For the GOTPAGE21/ADRP instruction we have the 21-bit delta to the 4k page
+# containing the GOT entry for external_data.
+#
+# For the GOTPAGEOFF/LDR instruction we have the 12-bit offset of the entry
+# within the page.
+#
+# jitlink-check: *{8}(got_addr(macho_reloc.o, external_data)) = external_data
+# jitlink-check: decode_operand(test_gotpage21, 1) = (got_addr(macho_reloc.o, external_data)[32:12] - test_gotpage21[32:12])
+# jitlink-check: decode_operand(test_gotpageoff12, 2) = got_addr(macho_reloc.o, external_data)[11:3]
+ .globl test_gotpage21
+ .p2align 2
+test_gotpage21:
+ adrp x0, external_data at GOTPAGE
+ .globl test_gotpageoff12
+test_gotpageoff12:
+ ldr x0, [x0, external_data at GOTPAGEOFF]
+
+# Check ARM64_RELOC_PAGE21 / ARM64_RELOC_PAGEOFF12 handling with a reference to
+# a local symbol.
+#
+# For the PAGE21/ADRP instruction we have the 21-bit delta to the 4k page
+# containing the global.
+#
+# For the GOTPAGEOFF12 relocation we test the ADD instruction, all LDR/GPR
+# variants and all LDR/Neon variants.
+#
+# jitlink-check: decode_operand(test_page21, 1) = (named_data[32:12] - test_page21[32:12])
+# jitlink-check: decode_operand(test_pageoff12add, 2) = named_data[11:0]
+# jitlink-check: decode_operand(test_pageoff12gpr8, 2) = named_data[11:0]
+# jitlink-check: decode_operand(test_pageoff12gpr16, 2) = named_data[11:1]
+# jitlink-check: decode_operand(test_pageoff12gpr32, 2) = named_data[11:2]
+# jitlink-check: decode_operand(test_pageoff12gpr64, 2) = named_data[11:3]
+# jitlink-check: decode_operand(test_pageoff12neon8, 2) = named_data[11:0]
+# jitlink-check: decode_operand(test_pageoff12neon16, 2) = named_data[11:1]
+# jitlink-check: decode_operand(test_pageoff12neon32, 2) = named_data[11:2]
+# jitlink-check: decode_operand(test_pageoff12neon64, 2) = named_data[11:3]
+# jitlink-check: decode_operand(test_pageoff12neon128, 2) = named_data[11:4]
+ .globl test_page21
+ .p2align 2
+test_page21:
+ adrp x0, named_data at PAGE
+
+ .globl test_pageoff12add
+test_pageoff12add:
+ add x0, x0, named_data at PAGEOFF
+
+ .globl test_pageoff12gpr8
+test_pageoff12gpr8:
+ ldrb w0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12gpr16
+test_pageoff12gpr16:
+ ldrh w0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12gpr32
+test_pageoff12gpr32:
+ ldr w0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12gpr64
+test_pageoff12gpr64:
+ ldr x0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12neon8
+test_pageoff12neon8:
+ ldr b0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12neon16
+test_pageoff12neon16:
+ ldr h0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12neon32
+test_pageoff12neon32:
+ ldr s0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12neon64
+test_pageoff12neon64:
+ ldr d0, [x0, named_data at PAGEOFF]
+
+ .globl test_pageoff12neon128
+test_pageoff12neon128:
+ ldr q0, [x0, named_data at PAGEOFF]
+
+# Check that calls to external functions trigger the generation of stubs and GOT
+# entries.
+#
+# jitlink-check: decode_operand(test_external_call, 0) = (stub_addr(macho_reloc.o, external_func) - test_external_call)[27:2]
+# jitlink-check: *{8}(got_addr(macho_reloc.o, external_func)) = external_func
+ .globl test_external_call
+ .p2align 2
+test_external_call:
+ bl external_func
+
+ .section __DATA,__data
+
+# Storage target for non-extern ARM64_RELOC_SUBTRACTOR relocs.
+ .p2align 3
+Lanon_data:
+ .quad 0x1111111111111111
+
+# Check ARM64_RELOC_SUBTRACTOR Quad/Long in anonymous storage with anonymous
+# minuend: "LA: .quad LA - B + C". The anonymous subtrahend form
+# "LA: .quad B - LA + C" is not tested as subtrahends are not permitted to be
+# anonymous.
+#
+# Note: +8 offset in expression below to accounts for sizeof(Lanon_data).
+# jitlink-check: *{8}(section_addr(macho_reloc.o, __data) + 8) = (section_addr(macho_reloc.o, __data) + 8) - named_data + 2
+ .p2align 3
+Lanon_minuend_quad:
+ .quad Lanon_minuend_quad - named_data + 2
+
+# Note: +16 offset in expression below to accounts for sizeof(Lanon_data) + sizeof(Lanon_minuend_long).
+# jitlink-check: *{4}(section_addr(macho_reloc.o, __data) + 16) = ((section_addr(macho_reloc.o, __data) + 16) - named_data + 2)[31:0]
+ .p2align 2
+Lanon_minuend_long:
+ .long Lanon_minuend_long - named_data + 2
+
+# Named quad storage target (first named atom in __data).
+# Align to 16 for use as 128-bit load target.
+ .globl named_data
+ .p2align 4
+named_data:
+ .quad 0x2222222222222222
+ .quad 0x3333333333333333
+
+# An alt-entry point for named_data
+ .globl named_data_alt_entry
+ .p2align 3
+ .alt_entry named_data_alt_entry
+named_data_alt_entry:
+ .quad 0
+
+# Check ARM64_RELOC_UNSIGNED / quad / extern handling by putting the address of
+# a local named function into a quad symbol.
+#
+# jitlink-check: *{8}named_func_addr_quad = named_func
+ .globl named_func_addr_quad
+ .p2align 3
+named_func_addr_quad:
+ .quad named_func
+
+# Check ARM64_RELOC_UNSIGNED / quad / non-extern handling by putting the
+# address of a local anonymous function into a quad symbol.
+#
+# jitlink-check: *{8}anon_func_addr_quad = section_addr(macho_reloc.o, __text)
+ .globl anon_func_addr_quad
+ .p2align 3
+anon_func_addr_quad:
+ .quad Lanon_func
+
+# ARM64_RELOC_SUBTRACTOR Quad/Long in named storage with anonymous minuend
+#
+# jitlink-check: *{8}anon_minuend_quad1 = section_addr(macho_reloc.o, __data) - anon_minuend_quad1 + 2
+# Only the form "B: .quad LA - B + C" is tested. The form "B: .quad B - LA + C" is
+# invalid because the subtrahend can not be local.
+ .globl anon_minuend_quad1
+ .p2align 3
+anon_minuend_quad1:
+ .quad Lanon_data - anon_minuend_quad1 + 2
+
+# jitlink-check: *{4}anon_minuend_long1 = (section_addr(macho_reloc.o, __data) - anon_minuend_long1 + 2)[31:0]
+ .globl anon_minuend_long1
+ .p2align 2
+anon_minuend_long1:
+ .long Lanon_data - anon_minuend_long1 + 2
+
+# Check ARM64_RELOC_SUBTRACTOR Quad/Long in named storage with minuend and subtrahend.
+# Both forms "A: .quad A - B + C" and "A: .quad B - A + C" are tested.
+#
+# Check "A: .quad B - A + C".
+# jitlink-check: *{8}subtrahend_quad2 = (named_data - subtrahend_quad2 - 2)
+ .globl subtrahend_quad2
+ .p2align 3
+subtrahend_quad2:
+ .quad named_data - subtrahend_quad2 - 2
+
+# Check "A: .long B - A + C".
+# jitlink-check: *{4}subtrahend_long2 = (named_data - subtrahend_long2 - 2)[31:0]
+ .globl subtrahend_long2
+ .p2align 2
+subtrahend_long2:
+ .long named_data - subtrahend_long2 - 2
+
+# Check "A: .quad A - B + C".
+# jitlink-check: *{8}minuend_quad3 = (minuend_quad3 - named_data - 2)
+ .globl minuend_quad3
+ .p2align 3
+minuend_quad3:
+ .quad minuend_quad3 - named_data - 2
+
+# Check "A: .long B - A + C".
+# jitlink-check: *{4}minuend_long3 = (minuend_long3 - named_data - 2)[31:0]
+ .globl minuend_long3
+ .p2align 2
+minuend_long3:
+ .long minuend_long3 - named_data - 2
+
+# Check ARM64_RELOC_SUBTRACTOR handling for exprs of the form
+# "A: .quad/long B - C + D", where 'B' or 'C' is at a fixed offset from 'A'
+# (i.e. is part of an alt_entry chain that includes 'A').
+#
+# Check "A: .long B - C + D" where 'B' is an alt_entry for 'A'.
+# jitlink-check: *{4}subtractor_with_alt_entry_minuend_long = (subtractor_with_alt_entry_minuend_long_B - named_data + 2)[31:0]
+ .globl subtractor_with_alt_entry_minuend_long
+ .p2align 2
+subtractor_with_alt_entry_minuend_long:
+ .long subtractor_with_alt_entry_minuend_long_B - named_data + 2
+
+ .globl subtractor_with_alt_entry_minuend_long_B
+ .p2align 2
+ .alt_entry subtractor_with_alt_entry_minuend_long_B
+subtractor_with_alt_entry_minuend_long_B:
+ .long 0
+
+# Check "A: .quad B - C + D" where 'B' is an alt_entry for 'A'.
+# jitlink-check: *{8}subtractor_with_alt_entry_minuend_quad = (subtractor_with_alt_entry_minuend_quad_B - named_data + 2)
+ .globl subtractor_with_alt_entry_minuend_quad
+ .p2align 3
+subtractor_with_alt_entry_minuend_quad:
+ .quad subtractor_with_alt_entry_minuend_quad_B - named_data + 2
+
+ .globl subtractor_with_alt_entry_minuend_quad_B
+ .p2align 3
+ .alt_entry subtractor_with_alt_entry_minuend_quad_B
+subtractor_with_alt_entry_minuend_quad_B:
+ .quad 0
+
+# Check "A: .long B - C + D" where 'C' is an alt_entry for 'A'.
+# jitlink-check: *{4}subtractor_with_alt_entry_subtrahend_long = (named_data - subtractor_with_alt_entry_subtrahend_long_B + 2)[31:0]
+ .globl subtractor_with_alt_entry_subtrahend_long
+ .p2align 2
+subtractor_with_alt_entry_subtrahend_long:
+ .long named_data - subtractor_with_alt_entry_subtrahend_long_B + 2
+
+ .globl subtractor_with_alt_entry_subtrahend_long_B
+ .p2align 2
+ .alt_entry subtractor_with_alt_entry_subtrahend_long_B
+subtractor_with_alt_entry_subtrahend_long_B:
+ .long 0
+
+# Check "A: .quad B - C + D" where 'B' is an alt_entry for 'A'.
+# jitlink-check: *{8}subtractor_with_alt_entry_subtrahend_quad = (named_data - subtractor_with_alt_entry_subtrahend_quad_B + 2)
+ .globl subtractor_with_alt_entry_subtrahend_quad
+ .p2align 3
+subtractor_with_alt_entry_subtrahend_quad:
+ .quad named_data - subtractor_with_alt_entry_subtrahend_quad_B + 2
+
+ .globl subtractor_with_alt_entry_subtrahend_quad_B
+ .p2align 3
+ .alt_entry subtractor_with_alt_entry_subtrahend_quad_B
+subtractor_with_alt_entry_subtrahend_quad_B:
+ .quad 0
+
+# Check ARM64_POINTER_TO_GOT handling.
+# ARM64_POINTER_TO_GOT is a delta-32 to a GOT entry.
+#
+# jitlink-check: *{4}test_got = (got_addr(macho_reloc.o, external_data) - test_got)[31:0]
+ .globl test_got
+ .p2align 2
+test_got:
+ .long external_data at got - .
+
+# Check that unreferenced atoms in no-dead-strip sections are not dead stripped.
+# We need to use a local symbol for this as any named symbol will end up in the
+# ORC responsibility set, which is automatically marked live and would couse
+# spurious passes.
+#
+# jitlink-check: *{8}section_addr(macho_reloc.o, __nds_test_sect) = 0
+ .section __DATA,__nds_test_sect,regular,no_dead_strip
+ .quad 0
+
+# Check that unreferenced local symbols that have been marked no-dead-strip are
+# not dead-striped.
+#
+# jitlink-check: *{8}section_addr(macho_reloc.o, __nds_test_nlst) = 0
+ .section __DATA,__nds_test_nlst,regular
+ .no_dead_strip no_dead_strip_test_symbol
+no_dead_strip_test_symbol:
+ .quad 0
+
+# Check that explicit zero-fill symbols are supported
+# jitlink-check: *{8}zero_fill_test = 0
+ .globl zero_fill_test
+.zerofill __DATA,__zero_fill_test,zero_fill_test,8,3
+
+# Check that section alignments are respected.
+# We test this by introducing two segments with alignment 8, each containing one
+# byte of data. We require both symbols to have an aligned address.
+#
+# jitlink-check: section_alignment_check1[2:0] = 0
+# jitlink-check: section_alignment_check2[2:0] = 0
+ .section __DATA,__sec_align_chk1
+ .p2align 3
+
+ .globl section_alignment_check1
+section_alignment_check1:
+ .byte 0
+
+ .section __DATA,__sec_align_chk2
+ .p2align 3
+
+ .globl section_alignment_check2
+section_alignment_check2:
+ .byte 0
+
+.subsections_via_symbols
Added: llvm/trunk/test/ExecutionEngine/JITLink/AArch64/lit.local.cfg
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/ExecutionEngine/JITLink/AArch64/lit.local.cfg?rev=374476&view=auto
==============================================================================
--- llvm/trunk/test/ExecutionEngine/JITLink/AArch64/lit.local.cfg (added)
+++ llvm/trunk/test/ExecutionEngine/JITLink/AArch64/lit.local.cfg Thu Oct 10 16:37:51 2019
@@ -0,0 +1,2 @@
+if not 'AArch64' in config.root.targets:
+ config.unsupported = True
More information about the llvm-commits
mailing list