[lld] r305565 - Split Target.cpp into small files.

Rui Ueyama via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 16 10:32:44 PDT 2017


Author: ruiu
Date: Fri Jun 16 12:32:43 2017
New Revision: 305565

URL: http://llvm.org/viewvc/llvm-project?rev=305565&view=rev
Log:
Split Target.cpp into small files.

Target.cpp contains code for all the targets that LLD supports. It was
simple and easy, but as the number of supported targets increased,
it got messy.

This patch splits the file into per-target files under ELF/arch directory.

Differential Revision: https://reviews.llvm.org/D34222

Added:
    lld/trunk/ELF/Arch/
    lld/trunk/ELF/Arch/AArch64.cpp
    lld/trunk/ELF/Arch/AMDGPU.cpp
    lld/trunk/ELF/Arch/ARM.cpp
    lld/trunk/ELF/Arch/AVR.cpp
    lld/trunk/ELF/Arch/Mips.cpp
    lld/trunk/ELF/Arch/PPC.cpp
    lld/trunk/ELF/Arch/PPC64.cpp
    lld/trunk/ELF/Arch/X86.cpp
    lld/trunk/ELF/Arch/X86_64.cpp
Modified:
    lld/trunk/ELF/CMakeLists.txt
    lld/trunk/ELF/Target.cpp
    lld/trunk/ELF/Target.h

Added: lld/trunk/ELF/Arch/AArch64.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/AArch64.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/AArch64.cpp (added)
+++ lld/trunk/ELF/Arch/AArch64.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,374 @@
+//===- AArch64.cpp --------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "Thunks.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+// Page(Expr) is the page address of the expression Expr, defined
+// as (Expr & ~0xFFF). (This applies even if the machine page size
+// supported by the platform has a different value.)
+uint64_t elf::getAArch64Page(uint64_t Expr) {
+  return Expr & ~static_cast<uint64_t>(0xFFF);
+}
+
+namespace {
+class AArch64 final : public TargetInfo {
+public:
+  AArch64();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  bool isPicRel(uint32_t Type) const override;
+  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writePltHeader(uint8_t *Buf) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  bool usesOnlyLowPageBits(uint32_t Type) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                          RelExpr Expr) const override;
+  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+};
+} // namespace
+
+AArch64::AArch64() {
+  CopyRel = R_AARCH64_COPY;
+  RelativeRel = R_AARCH64_RELATIVE;
+  IRelativeRel = R_AARCH64_IRELATIVE;
+  GotRel = R_AARCH64_GLOB_DAT;
+  PltRel = R_AARCH64_JUMP_SLOT;
+  TlsDescRel = R_AARCH64_TLSDESC;
+  TlsGotRel = R_AARCH64_TLS_TPREL64;
+  GotEntrySize = 8;
+  GotPltEntrySize = 8;
+  PltEntrySize = 16;
+  PltHeaderSize = 32;
+  DefaultMaxPageSize = 65536;
+
+  // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
+  // 1 of the tls structures and the tcb size is 16.
+  TcbSize = 16;
+}
+
+RelExpr AArch64::getRelExpr(uint32_t Type, const SymbolBody &S,
+                            const uint8_t *Loc) const {
+  switch (Type) {
+  default:
+    return R_ABS;
+  case R_AARCH64_TLSDESC_ADR_PAGE21:
+    return R_TLSDESC_PAGE;
+  case R_AARCH64_TLSDESC_LD64_LO12:
+  case R_AARCH64_TLSDESC_ADD_LO12:
+    return R_TLSDESC;
+  case R_AARCH64_TLSDESC_CALL:
+    return R_TLSDESC_CALL;
+  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
+  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    return R_TLS;
+  case R_AARCH64_CALL26:
+  case R_AARCH64_CONDBR19:
+  case R_AARCH64_JUMP26:
+  case R_AARCH64_TSTBR14:
+    return R_PLT_PC;
+  case R_AARCH64_PREL16:
+  case R_AARCH64_PREL32:
+  case R_AARCH64_PREL64:
+  case R_AARCH64_ADR_PREL_LO21:
+    return R_PC;
+  case R_AARCH64_ADR_PREL_PG_HI21:
+    return R_PAGE_PC;
+  case R_AARCH64_LD64_GOT_LO12_NC:
+  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+    return R_GOT;
+  case R_AARCH64_ADR_GOT_PAGE:
+  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+    return R_GOT_PAGE_PC;
+  case R_AARCH64_NONE:
+    return R_NONE;
+  }
+}
+
+RelExpr AArch64::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                                 RelExpr Expr) const {
+  if (Expr == R_RELAX_TLS_GD_TO_IE) {
+    if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
+      return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
+    return R_RELAX_TLS_GD_TO_IE_ABS;
+  }
+  return Expr;
+}
+
+bool AArch64::usesOnlyLowPageBits(uint32_t Type) const {
+  switch (Type) {
+  default:
+    return false;
+  case R_AARCH64_ADD_ABS_LO12_NC:
+  case R_AARCH64_LD64_GOT_LO12_NC:
+  case R_AARCH64_LDST128_ABS_LO12_NC:
+  case R_AARCH64_LDST16_ABS_LO12_NC:
+  case R_AARCH64_LDST32_ABS_LO12_NC:
+  case R_AARCH64_LDST64_ABS_LO12_NC:
+  case R_AARCH64_LDST8_ABS_LO12_NC:
+  case R_AARCH64_TLSDESC_ADD_LO12:
+  case R_AARCH64_TLSDESC_LD64_LO12:
+  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+    return true;
+  }
+}
+
+bool AArch64::isPicRel(uint32_t Type) const {
+  return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
+}
+
+void AArch64::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
+  write64le(Buf, InX::Plt->getVA());
+}
+
+void AArch64::writePltHeader(uint8_t *Buf) const {
+  const uint8_t PltData[] = {
+      0xf0, 0x7b, 0xbf, 0xa9, // stp    x16, x30, [sp,#-16]!
+      0x10, 0x00, 0x00, 0x90, // adrp   x16, Page(&(.plt.got[2]))
+      0x11, 0x02, 0x40, 0xf9, // ldr    x17, [x16, Offset(&(.plt.got[2]))]
+      0x10, 0x02, 0x00, 0x91, // add    x16, x16, Offset(&(.plt.got[2]))
+      0x20, 0x02, 0x1f, 0xd6, // br     x17
+      0x1f, 0x20, 0x03, 0xd5, // nop
+      0x1f, 0x20, 0x03, 0xd5, // nop
+      0x1f, 0x20, 0x03, 0xd5  // nop
+  };
+  memcpy(Buf, PltData, sizeof(PltData));
+
+  uint64_t Got = InX::GotPlt->getVA();
+  uint64_t Plt = InX::Plt->getVA();
+  relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
+              getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
+  relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
+  relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
+}
+
+void AArch64::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                       uint64_t PltEntryAddr, int32_t Index,
+                       unsigned RelOff) const {
+  const uint8_t Inst[] = {
+      0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
+      0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
+      0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
+      0x20, 0x02, 0x1f, 0xd6  // br   x17
+  };
+  memcpy(Buf, Inst, sizeof(Inst));
+
+  relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
+              getAArch64Page(GotPltEntryAddr) - getAArch64Page(PltEntryAddr));
+  relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotPltEntryAddr);
+  relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotPltEntryAddr);
+}
+
+static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
+  uint32_t ImmLo = (Imm & 0x3) << 29;
+  uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+  uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+  write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+}
+
+// Return the bits [Start, End] from Val shifted Start bits.
+// For instance, getBits(0xF0, 4, 8) returns 0xF.
+static uint64_t getBits(uint64_t Val, int Start, int End) {
+  uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
+  return (Val >> Start) & Mask;
+}
+
+static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+// Update the immediate field in a AARCH64 ldr, str, and add instruction.
+static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
+  or32le(L, (Imm & 0xFFF) << 10);
+}
+
+void AArch64::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  switch (Type) {
+  case R_AARCH64_ABS16:
+  case R_AARCH64_PREL16:
+    checkIntUInt<16>(Loc, Val, Type);
+    write16le(Loc, Val);
+    break;
+  case R_AARCH64_ABS32:
+  case R_AARCH64_PREL32:
+    checkIntUInt<32>(Loc, Val, Type);
+    write32le(Loc, Val);
+    break;
+  case R_AARCH64_ABS64:
+  case R_AARCH64_GLOB_DAT:
+  case R_AARCH64_PREL64:
+    write64le(Loc, Val);
+    break;
+  case R_AARCH64_ADD_ABS_LO12_NC:
+    or32AArch64Imm(Loc, Val);
+    break;
+  case R_AARCH64_ADR_GOT_PAGE:
+  case R_AARCH64_ADR_PREL_PG_HI21:
+  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
+  case R_AARCH64_TLSDESC_ADR_PAGE21:
+    checkInt<33>(Loc, Val, Type);
+    write32AArch64Addr(Loc, Val >> 12);
+    break;
+  case R_AARCH64_ADR_PREL_LO21:
+    checkInt<21>(Loc, Val, Type);
+    write32AArch64Addr(Loc, Val);
+    break;
+  case R_AARCH64_CALL26:
+  case R_AARCH64_JUMP26:
+    checkInt<28>(Loc, Val, Type);
+    or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
+    break;
+  case R_AARCH64_CONDBR19:
+    checkInt<21>(Loc, Val, Type);
+    or32le(Loc, (Val & 0x1FFFFC) << 3);
+    break;
+  case R_AARCH64_LD64_GOT_LO12_NC:
+  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
+  case R_AARCH64_TLSDESC_LD64_LO12:
+    checkAlignment<8>(Loc, Val, Type);
+    or32le(Loc, (Val & 0xFF8) << 7);
+    break;
+  case R_AARCH64_LDST8_ABS_LO12_NC:
+    or32AArch64Imm(Loc, getBits(Val, 0, 11));
+    break;
+  case R_AARCH64_LDST16_ABS_LO12_NC:
+    or32AArch64Imm(Loc, getBits(Val, 1, 11));
+    break;
+  case R_AARCH64_LDST32_ABS_LO12_NC:
+    or32AArch64Imm(Loc, getBits(Val, 2, 11));
+    break;
+  case R_AARCH64_LDST64_ABS_LO12_NC:
+    or32AArch64Imm(Loc, getBits(Val, 3, 11));
+    break;
+  case R_AARCH64_LDST128_ABS_LO12_NC:
+    or32AArch64Imm(Loc, getBits(Val, 4, 11));
+    break;
+  case R_AARCH64_MOVW_UABS_G0_NC:
+    or32le(Loc, (Val & 0xFFFF) << 5);
+    break;
+  case R_AARCH64_MOVW_UABS_G1_NC:
+    or32le(Loc, (Val & 0xFFFF0000) >> 11);
+    break;
+  case R_AARCH64_MOVW_UABS_G2_NC:
+    or32le(Loc, (Val & 0xFFFF00000000) >> 27);
+    break;
+  case R_AARCH64_MOVW_UABS_G3:
+    or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
+    break;
+  case R_AARCH64_TSTBR14:
+    checkInt<16>(Loc, Val, Type);
+    or32le(Loc, (Val & 0xFFFC) << 3);
+    break;
+  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
+    checkInt<24>(Loc, Val, Type);
+    or32AArch64Imm(Loc, Val >> 12);
+    break;
+  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+  case R_AARCH64_TLSDESC_ADD_LO12:
+    or32AArch64Imm(Loc, Val);
+    break;
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+void AArch64::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // TLSDESC Global-Dynamic relocation are in the form:
+  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
+  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
+  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
+  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
+  //   blr     x1
+  // And it can optimized to:
+  //   movz    x0, #0x0, lsl #16
+  //   movk    x0, #0x10
+  //   nop
+  //   nop
+  checkUInt<32>(Loc, Val, Type);
+
+  switch (Type) {
+  case R_AARCH64_TLSDESC_ADD_LO12:
+  case R_AARCH64_TLSDESC_CALL:
+    write32le(Loc, 0xd503201f); // nop
+    return;
+  case R_AARCH64_TLSDESC_ADR_PAGE21:
+    write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
+    return;
+  case R_AARCH64_TLSDESC_LD64_LO12:
+    write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
+    return;
+  default:
+    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
+  }
+}
+
+void AArch64::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // TLSDESC Global-Dynamic relocation are in the form:
+  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
+  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
+  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
+  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
+  //   blr     x1
+  // And it can optimized to:
+  //   adrp    x0, :gottprel:v
+  //   ldr     x0, [x0, :gottprel_lo12:v]
+  //   nop
+  //   nop
+
+  switch (Type) {
+  case R_AARCH64_TLSDESC_ADD_LO12:
+  case R_AARCH64_TLSDESC_CALL:
+    write32le(Loc, 0xd503201f); // nop
+    break;
+  case R_AARCH64_TLSDESC_ADR_PAGE21:
+    write32le(Loc, 0x90000000); // adrp
+    relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
+    break;
+  case R_AARCH64_TLSDESC_LD64_LO12:
+    write32le(Loc, 0xf9400000); // ldr
+    relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
+    break;
+  default:
+    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
+  }
+}
+
+void AArch64::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  checkUInt<32>(Loc, Val, Type);
+
+  if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
+    // Generate MOVZ.
+    uint32_t RegNo = read32le(Loc) & 0x1f;
+    write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
+    return;
+  }
+  if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
+    // Generate MOVK.
+    uint32_t RegNo = read32le(Loc) & 0x1f;
+    write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
+    return;
+  }
+  llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
+}
+
+TargetInfo *elf::createAArch64TargetInfo() { return make<AArch64>(); }

Added: lld/trunk/ELF/Arch/AMDGPU.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/AMDGPU.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/AMDGPU.cpp (added)
+++ lld/trunk/ELF/Arch/AMDGPU.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,82 @@
+//===- AMDGPU.cpp ---------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "Target.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class AMDGPU final : public TargetInfo {
+public:
+  AMDGPU();
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+};
+} // namespace
+
+AMDGPU::AMDGPU() {
+  RelativeRel = R_AMDGPU_REL64;
+  GotRel = R_AMDGPU_ABS64;
+  GotEntrySize = 8;
+}
+
+void AMDGPU::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  switch (Type) {
+  case R_AMDGPU_ABS32:
+  case R_AMDGPU_GOTPCREL:
+  case R_AMDGPU_GOTPCREL32_LO:
+  case R_AMDGPU_REL32:
+  case R_AMDGPU_REL32_LO:
+    write32le(Loc, Val);
+    break;
+  case R_AMDGPU_ABS64:
+    write64le(Loc, Val);
+    break;
+  case R_AMDGPU_GOTPCREL32_HI:
+  case R_AMDGPU_REL32_HI:
+    write32le(Loc, Val >> 32);
+    break;
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+RelExpr AMDGPU::getRelExpr(uint32_t Type, const SymbolBody &S,
+                           const uint8_t *Loc) const {
+  switch (Type) {
+  case R_AMDGPU_ABS32:
+  case R_AMDGPU_ABS64:
+    return R_ABS;
+  case R_AMDGPU_REL32:
+  case R_AMDGPU_REL32_LO:
+  case R_AMDGPU_REL32_HI:
+    return R_PC;
+  case R_AMDGPU_GOTPCREL:
+  case R_AMDGPU_GOTPCREL32_LO:
+  case R_AMDGPU_GOTPCREL32_HI:
+    return R_GOT_PC;
+  default:
+    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
+    return R_HINT;
+  }
+}
+
+TargetInfo *elf::createAMDGPUTargetInfo() { return make<AMDGPU>(); }

Added: lld/trunk/ELF/Arch/ARM.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/ARM.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/ARM.cpp (added)
+++ lld/trunk/ELF/Arch/ARM.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,432 @@
+//===- ARM.cpp ------------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "Thunks.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class ARM final : public TargetInfo {
+public:
+  ARM();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  bool isPicRel(uint32_t Type) const override;
+  uint32_t getDynRel(uint32_t Type) const override;
+  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
+  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writePltHeader(uint8_t *Buf) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  void addPltSymbols(InputSectionBase *IS, uint64_t Off) const override;
+  void addPltHeaderSymbols(InputSectionBase *ISD) const override;
+  bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
+                  const SymbolBody &S) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+};
+} // namespace
+
+ARM::ARM() {
+  CopyRel = R_ARM_COPY;
+  RelativeRel = R_ARM_RELATIVE;
+  IRelativeRel = R_ARM_IRELATIVE;
+  GotRel = R_ARM_GLOB_DAT;
+  PltRel = R_ARM_JUMP_SLOT;
+  TlsGotRel = R_ARM_TLS_TPOFF32;
+  TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
+  TlsOffsetRel = R_ARM_TLS_DTPOFF32;
+  GotEntrySize = 4;
+  GotPltEntrySize = 4;
+  PltEntrySize = 16;
+  PltHeaderSize = 20;
+  // ARM uses Variant 1 TLS
+  TcbSize = 8;
+  NeedsThunks = true;
+}
+
+RelExpr ARM::getRelExpr(uint32_t Type, const SymbolBody &S,
+                        const uint8_t *Loc) const {
+  switch (Type) {
+  default:
+    return R_ABS;
+  case R_ARM_THM_JUMP11:
+    return R_PC;
+  case R_ARM_CALL:
+  case R_ARM_JUMP24:
+  case R_ARM_PC24:
+  case R_ARM_PLT32:
+  case R_ARM_PREL31:
+  case R_ARM_THM_JUMP19:
+  case R_ARM_THM_JUMP24:
+  case R_ARM_THM_CALL:
+    return R_PLT_PC;
+  case R_ARM_GOTOFF32:
+    // (S + A) - GOT_ORG
+    return R_GOTREL;
+  case R_ARM_GOT_BREL:
+    // GOT(S) + A - GOT_ORG
+    return R_GOT_OFF;
+  case R_ARM_GOT_PREL:
+  case R_ARM_TLS_IE32:
+    // GOT(S) + A - P
+    return R_GOT_PC;
+  case R_ARM_SBREL32:
+    return R_ARM_SBREL;
+  case R_ARM_TARGET1:
+    return Config->Target1Rel ? R_PC : R_ABS;
+  case R_ARM_TARGET2:
+    if (Config->Target2 == Target2Policy::Rel)
+      return R_PC;
+    if (Config->Target2 == Target2Policy::Abs)
+      return R_ABS;
+    return R_GOT_PC;
+  case R_ARM_TLS_GD32:
+    return R_TLSGD_PC;
+  case R_ARM_TLS_LDM32:
+    return R_TLSLD_PC;
+  case R_ARM_BASE_PREL:
+    // B(S) + A - P
+    // FIXME: currently B(S) assumed to be .got, this may not hold for all
+    // platforms.
+    return R_GOTONLY_PC;
+  case R_ARM_MOVW_PREL_NC:
+  case R_ARM_MOVT_PREL:
+  case R_ARM_REL32:
+  case R_ARM_THM_MOVW_PREL_NC:
+  case R_ARM_THM_MOVT_PREL:
+    return R_PC;
+  case R_ARM_NONE:
+    return R_NONE;
+  case R_ARM_TLS_LE32:
+    return R_TLS;
+  }
+}
+
+bool ARM::isPicRel(uint32_t Type) const {
+  return (Type == R_ARM_TARGET1 && !Config->Target1Rel) ||
+         (Type == R_ARM_ABS32);
+}
+
+uint32_t ARM::getDynRel(uint32_t Type) const {
+  if (Type == R_ARM_TARGET1 && !Config->Target1Rel)
+    return R_ARM_ABS32;
+  if (Type == R_ARM_ABS32)
+    return Type;
+  // Keep it going with a dummy value so that we can find more reloc errors.
+  return R_ARM_ABS32;
+}
+
+void ARM::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
+  write32le(Buf, InX::Plt->getVA());
+}
+
+void ARM::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
+  // An ARM entry is the address of the ifunc resolver function.
+  write32le(Buf, S.getVA());
+}
+
+void ARM::writePltHeader(uint8_t *Buf) const {
+  const uint8_t PltData[] = {
+      0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
+      0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
+      0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
+      0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
+      0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
+  };
+  memcpy(Buf, PltData, sizeof(PltData));
+  uint64_t GotPlt = InX::GotPlt->getVA();
+  uint64_t L1 = InX::Plt->getVA() + 8;
+  write32le(Buf + 16, GotPlt - L1 - 8);
+}
+
+void ARM::addPltHeaderSymbols(InputSectionBase *ISD) const {
+  auto *IS = cast<InputSection>(ISD);
+  addSyntheticLocal("$a", STT_NOTYPE, 0, 0, IS);
+  addSyntheticLocal("$d", STT_NOTYPE, 16, 0, IS);
+}
+
+void ARM::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                   uint64_t PltEntryAddr, int32_t Index,
+                   unsigned RelOff) const {
+  // FIXME: Using simple code sequence with simple relocations.
+  // There is a more optimal sequence but it requires support for the group
+  // relocations. See ELF for the ARM Architecture Appendix A.3
+  const uint8_t PltData[] = {
+      0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
+      0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
+      0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
+      0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
+  };
+  memcpy(Buf, PltData, sizeof(PltData));
+  uint64_t L1 = PltEntryAddr + 4;
+  write32le(Buf + 12, GotPltEntryAddr - L1 - 8);
+}
+
+void ARM::addPltSymbols(InputSectionBase *ISD, uint64_t Off) const {
+  auto *IS = cast<InputSection>(ISD);
+  addSyntheticLocal("$a", STT_NOTYPE, Off, 0, IS);
+  addSyntheticLocal("$d", STT_NOTYPE, Off + 12, 0, IS);
+}
+
+bool ARM::needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
+                     const SymbolBody &S) const {
+  // If S is an undefined weak symbol in an executable we don't need a Thunk.
+  // In a DSO calls to undefined symbols, including weak ones get PLT entries
+  // which may need a thunk.
+  if (S.isUndefined() && !S.isLocal() && S.symbol()->isWeak() &&
+      !Config->Shared)
+    return false;
+  // A state change from ARM to Thumb and vice versa must go through an
+  // interworking thunk if the relocation type is not R_ARM_CALL or
+  // R_ARM_THM_CALL.
+  switch (RelocType) {
+  case R_ARM_PC24:
+  case R_ARM_PLT32:
+  case R_ARM_JUMP24:
+    // Source is ARM, all PLT entries are ARM so no interworking required.
+    // Otherwise we need to interwork if Symbol has bit 0 set (Thumb).
+    if (Expr == R_PC && ((S.getVA() & 1) == 1))
+      return true;
+    break;
+  case R_ARM_THM_JUMP19:
+  case R_ARM_THM_JUMP24:
+    // Source is Thumb, all PLT entries are ARM so interworking is required.
+    // Otherwise we need to interwork if Symbol has bit 0 clear (ARM).
+    if (Expr == R_PLT_PC || ((S.getVA() & 1) == 0))
+      return true;
+    break;
+  }
+  return false;
+}
+
+void ARM::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  switch (Type) {
+  case R_ARM_ABS32:
+  case R_ARM_BASE_PREL:
+  case R_ARM_GLOB_DAT:
+  case R_ARM_GOTOFF32:
+  case R_ARM_GOT_BREL:
+  case R_ARM_GOT_PREL:
+  case R_ARM_REL32:
+  case R_ARM_RELATIVE:
+  case R_ARM_SBREL32:
+  case R_ARM_TARGET1:
+  case R_ARM_TARGET2:
+  case R_ARM_TLS_GD32:
+  case R_ARM_TLS_IE32:
+  case R_ARM_TLS_LDM32:
+  case R_ARM_TLS_LDO32:
+  case R_ARM_TLS_LE32:
+  case R_ARM_TLS_TPOFF32:
+  case R_ARM_TLS_DTPOFF32:
+    write32le(Loc, Val);
+    break;
+  case R_ARM_TLS_DTPMOD32:
+    write32le(Loc, 1);
+    break;
+  case R_ARM_PREL31:
+    checkInt<31>(Loc, Val, Type);
+    write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
+    break;
+  case R_ARM_CALL:
+    // R_ARM_CALL is used for BL and BLX instructions, depending on the
+    // value of bit 0 of Val, we must select a BL or BLX instruction
+    if (Val & 1) {
+      // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
+      // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
+      checkInt<26>(Loc, Val, Type);
+      write32le(Loc, 0xfa000000 |                    // opcode
+                         ((Val & 2) << 23) |         // H
+                         ((Val >> 2) & 0x00ffffff)); // imm24
+      break;
+    }
+    if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
+      // BLX (always unconditional) instruction to an ARM Target, select an
+      // unconditional BL.
+      write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
+    // fall through as BL encoding is shared with B
+    LLVM_FALLTHROUGH;
+  case R_ARM_JUMP24:
+  case R_ARM_PC24:
+  case R_ARM_PLT32:
+    checkInt<26>(Loc, Val, Type);
+    write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
+    break;
+  case R_ARM_THM_JUMP11:
+    checkInt<12>(Loc, Val, Type);
+    write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
+    break;
+  case R_ARM_THM_JUMP19:
+    // Encoding T3: Val = S:J2:J1:imm6:imm11:0
+    checkInt<21>(Loc, Val, Type);
+    write16le(Loc,
+              (read16le(Loc) & 0xfbc0) |   // opcode cond
+                  ((Val >> 10) & 0x0400) | // S
+                  ((Val >> 12) & 0x003f)); // imm6
+    write16le(Loc + 2,
+              0x8000 |                    // opcode
+                  ((Val >> 8) & 0x0800) | // J2
+                  ((Val >> 5) & 0x2000) | // J1
+                  ((Val >> 1) & 0x07ff)); // imm11
+    break;
+  case R_ARM_THM_CALL:
+    // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
+    // value of bit 0 of Val, we must select a BL or BLX instruction
+    if ((Val & 1) == 0) {
+      // Ensure BLX destination is 4-byte aligned. As BLX instruction may
+      // only be two byte aligned. This must be done before overflow check
+      Val = alignTo(Val, 4);
+    }
+    // Bit 12 is 0 for BLX, 1 for BL
+    write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
+    // Fall through as rest of encoding is the same as B.W
+    LLVM_FALLTHROUGH;
+  case R_ARM_THM_JUMP24:
+    // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
+    // FIXME: Use of I1 and I2 require v6T2ops
+    checkInt<25>(Loc, Val, Type);
+    write16le(Loc,
+              0xf000 |                     // opcode
+                  ((Val >> 14) & 0x0400) | // S
+                  ((Val >> 12) & 0x03ff)); // imm10
+    write16le(Loc + 2,
+              (read16le(Loc + 2) & 0xd000) |                  // opcode
+                  (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
+                  (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
+                  ((Val >> 1) & 0x07ff));                     // imm11
+    break;
+  case R_ARM_MOVW_ABS_NC:
+  case R_ARM_MOVW_PREL_NC:
+    write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
+                       (Val & 0x0fff));
+    break;
+  case R_ARM_MOVT_ABS:
+  case R_ARM_MOVT_PREL:
+    checkInt<32>(Loc, Val, Type);
+    write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
+                       (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
+    break;
+  case R_ARM_THM_MOVT_ABS:
+  case R_ARM_THM_MOVT_PREL:
+    // Encoding T1: A = imm4:i:imm3:imm8
+    checkInt<32>(Loc, Val, Type);
+    write16le(Loc,
+              0xf2c0 |                     // opcode
+                  ((Val >> 17) & 0x0400) | // i
+                  ((Val >> 28) & 0x000f)); // imm4
+    write16le(Loc + 2,
+              (read16le(Loc + 2) & 0x8f00) | // opcode
+                  ((Val >> 12) & 0x7000) |   // imm3
+                  ((Val >> 16) & 0x00ff));   // imm8
+    break;
+  case R_ARM_THM_MOVW_ABS_NC:
+  case R_ARM_THM_MOVW_PREL_NC:
+    // Encoding T3: A = imm4:i:imm3:imm8
+    write16le(Loc,
+              0xf240 |                     // opcode
+                  ((Val >> 1) & 0x0400) |  // i
+                  ((Val >> 12) & 0x000f)); // imm4
+    write16le(Loc + 2,
+              (read16le(Loc + 2) & 0x8f00) | // opcode
+                  ((Val << 4) & 0x7000) |    // imm3
+                  (Val & 0x00ff));           // imm8
+    break;
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+int64_t ARM::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
+  switch (Type) {
+  default:
+    return 0;
+  case R_ARM_ABS32:
+  case R_ARM_BASE_PREL:
+  case R_ARM_GOTOFF32:
+  case R_ARM_GOT_BREL:
+  case R_ARM_GOT_PREL:
+  case R_ARM_REL32:
+  case R_ARM_TARGET1:
+  case R_ARM_TARGET2:
+  case R_ARM_TLS_GD32:
+  case R_ARM_TLS_LDM32:
+  case R_ARM_TLS_LDO32:
+  case R_ARM_TLS_IE32:
+  case R_ARM_TLS_LE32:
+    return SignExtend64<32>(read32le(Buf));
+  case R_ARM_PREL31:
+    return SignExtend64<31>(read32le(Buf));
+  case R_ARM_CALL:
+  case R_ARM_JUMP24:
+  case R_ARM_PC24:
+  case R_ARM_PLT32:
+    return SignExtend64<26>(read32le(Buf) << 2);
+  case R_ARM_THM_JUMP11:
+    return SignExtend64<12>(read16le(Buf) << 1);
+  case R_ARM_THM_JUMP19: {
+    // Encoding T3: A = S:J2:J1:imm10:imm6:0
+    uint16_t Hi = read16le(Buf);
+    uint16_t Lo = read16le(Buf + 2);
+    return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
+                            ((Lo & 0x0800) << 8) |  // J2
+                            ((Lo & 0x2000) << 5) |  // J1
+                            ((Hi & 0x003f) << 12) | // imm6
+                            ((Lo & 0x07ff) << 1));  // imm11:0
+  }
+  case R_ARM_THM_CALL:
+  case R_ARM_THM_JUMP24: {
+    // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
+    // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
+    // FIXME: I1 and I2 require v6T2ops
+    uint16_t Hi = read16le(Buf);
+    uint16_t Lo = read16le(Buf + 2);
+    return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
+                            (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
+                            (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
+                            ((Hi & 0x003ff) << 12) |                   // imm0
+                            ((Lo & 0x007ff) << 1)); // imm11:0
+  }
+  // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
+  // MOVT is in the range -32768 <= A < 32768
+  case R_ARM_MOVW_ABS_NC:
+  case R_ARM_MOVT_ABS:
+  case R_ARM_MOVW_PREL_NC:
+  case R_ARM_MOVT_PREL: {
+    uint64_t Val = read32le(Buf) & 0x000f0fff;
+    return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
+  }
+  case R_ARM_THM_MOVW_ABS_NC:
+  case R_ARM_THM_MOVT_ABS:
+  case R_ARM_THM_MOVW_PREL_NC:
+  case R_ARM_THM_MOVT_PREL: {
+    // Encoding T3: A = imm4:i:imm3:imm8
+    uint16_t Hi = read16le(Buf);
+    uint16_t Lo = read16le(Buf + 2);
+    return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
+                            ((Hi & 0x0400) << 1) |  // i
+                            ((Lo & 0x7000) >> 4) |  // imm3
+                            (Lo & 0x00ff));         // imm8
+  }
+  }
+}
+
+TargetInfo *elf::createARMTargetInfo() { return make<ARM>(); }

Added: lld/trunk/ELF/Arch/AVR.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/AVR.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/AVR.cpp (added)
+++ lld/trunk/ELF/Arch/AVR.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,59 @@
+//===- AVR.cpp ------------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "Target.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class AVR final : public TargetInfo {
+public:
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+};
+} // namespace
+
+RelExpr AVR::getRelExpr(uint32_t Type, const SymbolBody &S,
+                        const uint8_t *Loc) const {
+  switch (Type) {
+  case R_AVR_CALL:
+    return R_ABS;
+  default:
+    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
+    return R_HINT;
+  }
+}
+
+void AVR::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  switch (Type) {
+  case R_AVR_CALL: {
+    uint16_t Hi = Val >> 17;
+    uint16_t Lo = Val >> 1;
+    write16le(Loc, read16le(Loc) | ((Hi >> 1) << 4) | (Hi & 1));
+    write16le(Loc + 2, Lo);
+    break;
+  }
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + toString(Type));
+  }
+}
+
+TargetInfo *elf::createAVRTargetInfo() { return make<AVR>(); }

Added: lld/trunk/ELF/Arch/Mips.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/Mips.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/Mips.cpp (added)
+++ lld/trunk/ELF/Arch/Mips.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,422 @@
+//===- MIPS.cpp -----------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "OutputSections.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "Thunks.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+template <class ELFT> class MIPS final : public TargetInfo {
+public:
+  MIPS();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
+  bool isPicRel(uint32_t Type) const override;
+  uint32_t getDynRel(uint32_t Type) const override;
+  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writePltHeader(uint8_t *Buf) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
+                  const SymbolBody &S) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  bool usesOnlyLowPageBits(uint32_t Type) const override;
+};
+} // namespace
+
+template <class ELFT> MIPS<ELFT>::MIPS() {
+  GotPltHeaderEntriesNum = 2;
+  DefaultMaxPageSize = 65536;
+  GotEntrySize = sizeof(typename ELFT::uint);
+  GotPltEntrySize = sizeof(typename ELFT::uint);
+  PltEntrySize = 16;
+  PltHeaderSize = 32;
+  CopyRel = R_MIPS_COPY;
+  PltRel = R_MIPS_JUMP_SLOT;
+  NeedsThunks = true;
+
+  if (ELFT::Is64Bits) {
+    RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
+    TlsGotRel = R_MIPS_TLS_TPREL64;
+    TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
+    TlsOffsetRel = R_MIPS_TLS_DTPREL64;
+  } else {
+    RelativeRel = R_MIPS_REL32;
+    TlsGotRel = R_MIPS_TLS_TPREL32;
+    TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
+    TlsOffsetRel = R_MIPS_TLS_DTPREL32;
+  }
+}
+
+template <class ELFT>
+RelExpr MIPS<ELFT>::getRelExpr(uint32_t Type, const SymbolBody &S,
+                               const uint8_t *Loc) const {
+  // See comment in the calculateMipsRelChain.
+  if (ELFT::Is64Bits || Config->MipsN32Abi)
+    Type &= 0xff;
+  switch (Type) {
+  default:
+    return R_ABS;
+  case R_MIPS_JALR:
+    return R_HINT;
+  case R_MIPS_GPREL16:
+  case R_MIPS_GPREL32:
+    return R_MIPS_GOTREL;
+  case R_MIPS_26:
+    return R_PLT;
+  case R_MIPS_HI16:
+  case R_MIPS_LO16:
+    // R_MIPS_HI16/R_MIPS_LO16 relocations against _gp_disp calculate
+    // offset between start of function and 'gp' value which by default
+    // equal to the start of .got section. In that case we consider these
+    // relocations as relative.
+    if (&S == ElfSym::MipsGpDisp)
+      return R_MIPS_GOT_GP_PC;
+    if (&S == ElfSym::MipsLocalGp)
+      return R_MIPS_GOT_GP;
+    LLVM_FALLTHROUGH;
+  case R_MIPS_GOT_OFST:
+    return R_ABS;
+  case R_MIPS_PC32:
+  case R_MIPS_PC16:
+  case R_MIPS_PC19_S2:
+  case R_MIPS_PC21_S2:
+  case R_MIPS_PC26_S2:
+  case R_MIPS_PCHI16:
+  case R_MIPS_PCLO16:
+    return R_PC;
+  case R_MIPS_GOT16:
+    if (S.isLocal())
+      return R_MIPS_GOT_LOCAL_PAGE;
+    LLVM_FALLTHROUGH;
+  case R_MIPS_CALL16:
+  case R_MIPS_GOT_DISP:
+  case R_MIPS_TLS_GOTTPREL:
+    return R_MIPS_GOT_OFF;
+  case R_MIPS_CALL_HI16:
+  case R_MIPS_CALL_LO16:
+  case R_MIPS_GOT_HI16:
+  case R_MIPS_GOT_LO16:
+    return R_MIPS_GOT_OFF32;
+  case R_MIPS_GOT_PAGE:
+    return R_MIPS_GOT_LOCAL_PAGE;
+  case R_MIPS_TLS_GD:
+    return R_MIPS_TLSGD;
+  case R_MIPS_TLS_LDM:
+    return R_MIPS_TLSLD;
+  }
+}
+
+template <class ELFT> bool MIPS<ELFT>::isPicRel(uint32_t Type) const {
+  return Type == R_MIPS_32 || Type == R_MIPS_64;
+}
+
+template <class ELFT> uint32_t MIPS<ELFT>::getDynRel(uint32_t Type) const {
+  return RelativeRel;
+}
+
+template <class ELFT>
+void MIPS<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
+  write32<ELFT::TargetEndianness>(Buf, InX::Plt->getVA());
+}
+
+template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
+static int64_t getPcRelocAddend(const uint8_t *Loc) {
+  uint32_t Instr = read32<E>(Loc);
+  uint32_t Mask = 0xffffffff >> (32 - BSIZE);
+  return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
+}
+
+template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
+static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
+  uint32_t Mask = 0xffffffff >> (32 - BSIZE);
+  uint32_t Instr = read32<E>(Loc);
+  if (SHIFT > 0)
+    checkAlignment<(1 << SHIFT)>(Loc, V, Type);
+  checkInt<BSIZE + SHIFT>(Loc, V, Type);
+  write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
+}
+
+template <endianness E> static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
+  uint32_t Instr = read32<E>(Loc);
+  uint16_t Res = ((V + 0x8000) >> 16) & 0xffff;
+  write32<E>(Loc, (Instr & 0xffff0000) | Res);
+}
+
+template <endianness E> static void writeMipsHigher(uint8_t *Loc, uint64_t V) {
+  uint32_t Instr = read32<E>(Loc);
+  uint16_t Res = ((V + 0x80008000) >> 32) & 0xffff;
+  write32<E>(Loc, (Instr & 0xffff0000) | Res);
+}
+
+template <endianness E> static void writeMipsHighest(uint8_t *Loc, uint64_t V) {
+  uint32_t Instr = read32<E>(Loc);
+  uint16_t Res = ((V + 0x800080008000) >> 48) & 0xffff;
+  write32<E>(Loc, (Instr & 0xffff0000) | Res);
+}
+
+template <endianness E> static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
+  uint32_t Instr = read32<E>(Loc);
+  write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
+}
+
+template <class ELFT> static bool isMipsR6() {
+  const auto &FirstObj = cast<ELFFileBase<ELFT>>(*Config->FirstElf);
+  uint32_t Arch = FirstObj.getObj().getHeader()->e_flags & EF_MIPS_ARCH;
+  return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6;
+}
+
+template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *Buf) const {
+  const endianness E = ELFT::TargetEndianness;
+  if (Config->MipsN32Abi) {
+    write32<E>(Buf, 0x3c0e0000);      // lui   $14, %hi(&GOTPLT[0])
+    write32<E>(Buf + 4, 0x8dd90000);  // lw    $25, %lo(&GOTPLT[0])($14)
+    write32<E>(Buf + 8, 0x25ce0000);  // addiu $14, $14, %lo(&GOTPLT[0])
+    write32<E>(Buf + 12, 0x030ec023); // subu  $24, $24, $14
+  } else {
+    write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
+    write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
+    write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
+    write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
+  }
+
+  write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
+  write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
+  write32<E>(Buf + 24, 0x0320f809); // jalr  $25
+  write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
+
+  uint64_t GotPlt = InX::GotPlt->getVA();
+  writeMipsHi16<E>(Buf, GotPlt);
+  writeMipsLo16<E>(Buf + 4, GotPlt);
+  writeMipsLo16<E>(Buf + 8, GotPlt);
+}
+
+template <class ELFT>
+void MIPS<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                          uint64_t PltEntryAddr, int32_t Index,
+                          unsigned RelOff) const {
+  const endianness E = ELFT::TargetEndianness;
+  write32<E>(Buf, 0x3c0f0000);     // lui   $15, %hi(.got.plt entry)
+  write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
+                                   // jr    $25
+  write32<E>(Buf + 8, isMipsR6<ELFT>() ? 0x03200009 : 0x03200008);
+  write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
+  writeMipsHi16<E>(Buf, GotPltEntryAddr);
+  writeMipsLo16<E>(Buf + 4, GotPltEntryAddr);
+  writeMipsLo16<E>(Buf + 12, GotPltEntryAddr);
+}
+
+template <class ELFT>
+bool MIPS<ELFT>::needsThunk(RelExpr Expr, uint32_t Type, const InputFile *File,
+                            const SymbolBody &S) const {
+  // Any MIPS PIC code function is invoked with its address in register $t9.
+  // So if we have a branch instruction from non-PIC code to the PIC one
+  // we cannot make the jump directly and need to create a small stubs
+  // to save the target function address.
+  // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
+  if (Type != R_MIPS_26)
+    return false;
+  auto *F = dyn_cast_or_null<ELFFileBase<ELFT>>(File);
+  if (!F)
+    return false;
+  // If current file has PIC code, LA25 stub is not required.
+  if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
+    return false;
+  auto *D = dyn_cast<DefinedRegular>(&S);
+  // LA25 is required if target file has PIC code
+  // or target symbol is a PIC symbol.
+  return D && D->isMipsPIC<ELFT>();
+}
+
+template <class ELFT>
+int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
+  const endianness E = ELFT::TargetEndianness;
+  switch (Type) {
+  default:
+    return 0;
+  case R_MIPS_32:
+  case R_MIPS_GPREL32:
+  case R_MIPS_TLS_DTPREL32:
+  case R_MIPS_TLS_TPREL32:
+    return SignExtend64<32>(read32<E>(Buf));
+  case R_MIPS_26:
+    // FIXME (simon): If the relocation target symbol is not a PLT entry
+    // we should use another expression for calculation:
+    // ((A << 2) | (P & 0xf0000000)) >> 2
+    return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
+  case R_MIPS_GPREL16:
+  case R_MIPS_LO16:
+  case R_MIPS_PCLO16:
+  case R_MIPS_TLS_DTPREL_HI16:
+  case R_MIPS_TLS_DTPREL_LO16:
+  case R_MIPS_TLS_TPREL_HI16:
+  case R_MIPS_TLS_TPREL_LO16:
+    return SignExtend64<16>(read32<E>(Buf));
+  case R_MIPS_PC16:
+    return getPcRelocAddend<E, 16, 2>(Buf);
+  case R_MIPS_PC19_S2:
+    return getPcRelocAddend<E, 19, 2>(Buf);
+  case R_MIPS_PC21_S2:
+    return getPcRelocAddend<E, 21, 2>(Buf);
+  case R_MIPS_PC26_S2:
+    return getPcRelocAddend<E, 26, 2>(Buf);
+  case R_MIPS_PC32:
+    return getPcRelocAddend<E, 32, 0>(Buf);
+  }
+}
+
+static std::pair<uint32_t, uint64_t>
+calculateMipsRelChain(uint8_t *Loc, uint32_t Type, uint64_t Val) {
+  // MIPS N64 ABI packs multiple relocations into the single relocation
+  // record. In general, all up to three relocations can have arbitrary
+  // types. In fact, Clang and GCC uses only a few combinations. For now,
+  // we support two of them. That is allow to pass at least all LLVM
+  // test suite cases.
+  // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
+  // <any relocation> / R_MIPS_64 / R_MIPS_NONE
+  // The first relocation is a 'real' relocation which is calculated
+  // using the corresponding symbol's value. The second and the third
+  // relocations used to modify result of the first one: extend it to
+  // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
+  // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
+  uint32_t Type2 = (Type >> 8) & 0xff;
+  uint32_t Type3 = (Type >> 16) & 0xff;
+  if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
+    return std::make_pair(Type, Val);
+  if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
+    return std::make_pair(Type2, Val);
+  if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
+    return std::make_pair(Type3, -Val);
+  error(getErrorLocation(Loc) + "unsupported relocations combination " +
+        Twine(Type));
+  return std::make_pair(Type & 0xff, Val);
+}
+
+template <class ELFT>
+void MIPS<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  const endianness E = ELFT::TargetEndianness;
+  // Thread pointer and DRP offsets from the start of TLS data area.
+  // https://www.linux-mips.org/wiki/NPTL
+  if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 ||
+      Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64)
+    Val -= 0x8000;
+  else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 ||
+           Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64)
+    Val -= 0x7000;
+  if (ELFT::Is64Bits || Config->MipsN32Abi)
+    std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val);
+  switch (Type) {
+  case R_MIPS_32:
+  case R_MIPS_GPREL32:
+  case R_MIPS_TLS_DTPREL32:
+  case R_MIPS_TLS_TPREL32:
+    write32<E>(Loc, Val);
+    break;
+  case R_MIPS_64:
+  case R_MIPS_TLS_DTPREL64:
+  case R_MIPS_TLS_TPREL64:
+    write64<E>(Loc, Val);
+    break;
+  case R_MIPS_26:
+    write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | ((Val >> 2) & 0x3ffffff));
+    break;
+  case R_MIPS_GOT16:
+    // The R_MIPS_GOT16 relocation's value in "relocatable" linking mode
+    // is updated addend (not a GOT index). In that case write high 16 bits
+    // to store a correct addend value.
+    if (Config->Relocatable)
+      writeMipsHi16<E>(Loc, Val);
+    else {
+      checkInt<16>(Loc, Val, Type);
+      writeMipsLo16<E>(Loc, Val);
+    }
+    break;
+  case R_MIPS_GOT_DISP:
+  case R_MIPS_GOT_PAGE:
+  case R_MIPS_GPREL16:
+  case R_MIPS_TLS_GD:
+  case R_MIPS_TLS_LDM:
+    checkInt<16>(Loc, Val, Type);
+    LLVM_FALLTHROUGH;
+  case R_MIPS_CALL16:
+  case R_MIPS_CALL_LO16:
+  case R_MIPS_GOT_LO16:
+  case R_MIPS_GOT_OFST:
+  case R_MIPS_LO16:
+  case R_MIPS_PCLO16:
+  case R_MIPS_TLS_DTPREL_LO16:
+  case R_MIPS_TLS_GOTTPREL:
+  case R_MIPS_TLS_TPREL_LO16:
+    writeMipsLo16<E>(Loc, Val);
+    break;
+  case R_MIPS_CALL_HI16:
+  case R_MIPS_GOT_HI16:
+  case R_MIPS_HI16:
+  case R_MIPS_PCHI16:
+  case R_MIPS_TLS_DTPREL_HI16:
+  case R_MIPS_TLS_TPREL_HI16:
+    writeMipsHi16<E>(Loc, Val);
+    break;
+  case R_MIPS_HIGHER:
+    writeMipsHigher<E>(Loc, Val);
+    break;
+  case R_MIPS_HIGHEST:
+    writeMipsHighest<E>(Loc, Val);
+    break;
+  case R_MIPS_JALR:
+    // Ignore this optimization relocation for now
+    break;
+  case R_MIPS_PC16:
+    applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
+    break;
+  case R_MIPS_PC19_S2:
+    applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
+    break;
+  case R_MIPS_PC21_S2:
+    applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
+    break;
+  case R_MIPS_PC26_S2:
+    applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
+    break;
+  case R_MIPS_PC32:
+    applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
+    break;
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+template <class ELFT>
+bool MIPS<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
+  return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
+}
+
+template <class ELFT> TargetInfo *elf::createMipsTargetInfo() {
+  return make<MIPS<ELFT>>();
+}
+
+template TargetInfo *elf::createMipsTargetInfo<ELF32LE>();
+template TargetInfo *elf::createMipsTargetInfo<ELF32BE>();
+template TargetInfo *elf::createMipsTargetInfo<ELF64LE>();
+template TargetInfo *elf::createMipsTargetInfo<ELF64BE>();

Added: lld/trunk/ELF/Arch/PPC.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/PPC.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/PPC.cpp (added)
+++ lld/trunk/ELF/Arch/PPC.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,63 @@
+//===- PPC.cpp ------------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "Target.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class PPC final : public TargetInfo {
+public:
+  PPC() {}
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+};
+} // namespace
+
+void PPC::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  switch (Type) {
+  case R_PPC_ADDR16_HA:
+    write16be(Loc, (Val + 0x8000) >> 16);
+    break;
+  case R_PPC_ADDR16_LO:
+    write16be(Loc, Val);
+    break;
+  case R_PPC_ADDR32:
+  case R_PPC_REL32:
+    write32be(Loc, Val);
+    break;
+  case R_PPC_REL24:
+    write32be(Loc, read32be(Loc) | (Val & 0x3FFFFFC));
+    break;
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+RelExpr PPC::getRelExpr(uint32_t Type, const SymbolBody &S,
+                        const uint8_t *Loc) const {
+  switch (Type) {
+  case R_PPC_REL24:
+  case R_PPC_REL32:
+    return R_PC;
+  default:
+    return R_ABS;
+  }
+}
+
+TargetInfo *elf::createPPCTargetInfo() { return make<PPC>(); }

Added: lld/trunk/ELF/Arch/PPC64.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/PPC64.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/PPC64.cpp (added)
+++ lld/trunk/ELF/Arch/PPC64.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,215 @@
+//===- PPC64.cpp ----------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+static uint64_t PPC64TocOffset = 0x8000;
+
+uint64_t elf::getPPC64TocBase() {
+  // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
+  // TOC starts where the first of these sections starts. We always create a
+  // .got when we see a relocation that uses it, so for us the start is always
+  // the .got.
+  uint64_t TocVA = InX::Got->getVA();
+
+  // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+  // thus permitting a full 64 Kbytes segment. Note that the glibc startup
+  // code (crt1.o) assumes that you can get from the TOC base to the
+  // start of the .toc section with only a single (signed) 16-bit relocation.
+  return TocVA + PPC64TocOffset;
+}
+
+namespace {
+class PPC64 final : public TargetInfo {
+public:
+  PPC64();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+};
+} // namespace
+
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
+static uint16_t applyPPCLo(uint64_t V) { return V; }
+static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
+static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
+static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
+static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
+static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
+static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
+
+PPC64::PPC64() {
+  PltRel = GotRel = R_PPC64_GLOB_DAT;
+  RelativeRel = R_PPC64_RELATIVE;
+  GotEntrySize = 8;
+  GotPltEntrySize = 8;
+  PltEntrySize = 32;
+  PltHeaderSize = 0;
+
+  // We need 64K pages (at least under glibc/Linux, the loader won't
+  // set different permissions on a finer granularity than that).
+  DefaultMaxPageSize = 65536;
+
+  // The PPC64 ELF ABI v1 spec, says:
+  //
+  //   It is normally desirable to put segments with different characteristics
+  //   in separate 256 Mbyte portions of the address space, to give the
+  //   operating system full paging flexibility in the 64-bit address space.
+  //
+  // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
+  // use 0x10000000 as the starting address.
+  DefaultImageBase = 0x10000000;
+}
+
+RelExpr PPC64::getRelExpr(uint32_t Type, const SymbolBody &S,
+                          const uint8_t *Loc) const {
+  switch (Type) {
+  default:
+    return R_ABS;
+  case R_PPC64_TOC16:
+  case R_PPC64_TOC16_DS:
+  case R_PPC64_TOC16_HA:
+  case R_PPC64_TOC16_HI:
+  case R_PPC64_TOC16_LO:
+  case R_PPC64_TOC16_LO_DS:
+    return R_GOTREL;
+  case R_PPC64_TOC:
+    return R_PPC_TOC;
+  case R_PPC64_REL24:
+    return R_PPC_PLT_OPD;
+  }
+}
+
+void PPC64::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                     uint64_t PltEntryAddr, int32_t Index,
+                     unsigned RelOff) const {
+  uint64_t Off = GotPltEntryAddr - getPPC64TocBase();
+
+  // FIXME: What we should do, in theory, is get the offset of the function
+  // descriptor in the .opd section, and use that as the offset from %r2 (the
+  // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
+  // be a pointer to the function descriptor in the .opd section. Using
+  // this scheme is simpler, but requires an extra indirection per PLT dispatch.
+
+  write32be(Buf, 0xf8410028);                       // std %r2, 40(%r1)
+  write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X at ha
+  write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X at l(%r11)
+  write32be(Buf + 12, 0xe96c0000);                  // ld %r11,0(%r12)
+  write32be(Buf + 16, 0x7d6903a6);                  // mtctr %r11
+  write32be(Buf + 20, 0xe84c0008);                  // ld %r2,8(%r12)
+  write32be(Buf + 24, 0xe96c0010);                  // ld %r11,16(%r12)
+  write32be(Buf + 28, 0x4e800420);                  // bctr
+}
+
+static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
+  uint64_t V = Val - PPC64TocOffset;
+  switch (Type) {
+  case R_PPC64_TOC16:
+    return {R_PPC64_ADDR16, V};
+  case R_PPC64_TOC16_DS:
+    return {R_PPC64_ADDR16_DS, V};
+  case R_PPC64_TOC16_HA:
+    return {R_PPC64_ADDR16_HA, V};
+  case R_PPC64_TOC16_HI:
+    return {R_PPC64_ADDR16_HI, V};
+  case R_PPC64_TOC16_LO:
+    return {R_PPC64_ADDR16_LO, V};
+  case R_PPC64_TOC16_LO_DS:
+    return {R_PPC64_ADDR16_LO_DS, V};
+  default:
+    return {Type, Val};
+  }
+}
+
+void PPC64::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // For a TOC-relative relocation, proceed in terms of the corresponding
+  // ADDR16 relocation type.
+  std::tie(Type, Val) = toAddr16Rel(Type, Val);
+
+  switch (Type) {
+  case R_PPC64_ADDR14: {
+    checkAlignment<4>(Loc, Val, Type);
+    // Preserve the AA/LK bits in the branch instruction
+    uint8_t AALK = Loc[3];
+    write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
+    break;
+  }
+  case R_PPC64_ADDR16:
+    checkInt<16>(Loc, Val, Type);
+    write16be(Loc, Val);
+    break;
+  case R_PPC64_ADDR16_DS:
+    checkInt<16>(Loc, Val, Type);
+    write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
+    break;
+  case R_PPC64_ADDR16_HA:
+  case R_PPC64_REL16_HA:
+    write16be(Loc, applyPPCHa(Val));
+    break;
+  case R_PPC64_ADDR16_HI:
+  case R_PPC64_REL16_HI:
+    write16be(Loc, applyPPCHi(Val));
+    break;
+  case R_PPC64_ADDR16_HIGHER:
+    write16be(Loc, applyPPCHigher(Val));
+    break;
+  case R_PPC64_ADDR16_HIGHERA:
+    write16be(Loc, applyPPCHighera(Val));
+    break;
+  case R_PPC64_ADDR16_HIGHEST:
+    write16be(Loc, applyPPCHighest(Val));
+    break;
+  case R_PPC64_ADDR16_HIGHESTA:
+    write16be(Loc, applyPPCHighesta(Val));
+    break;
+  case R_PPC64_ADDR16_LO:
+    write16be(Loc, applyPPCLo(Val));
+    break;
+  case R_PPC64_ADDR16_LO_DS:
+  case R_PPC64_REL16_LO:
+    write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
+    break;
+  case R_PPC64_ADDR32:
+  case R_PPC64_REL32:
+    checkInt<32>(Loc, Val, Type);
+    write32be(Loc, Val);
+    break;
+  case R_PPC64_ADDR64:
+  case R_PPC64_REL64:
+  case R_PPC64_TOC:
+    write64be(Loc, Val);
+    break;
+  case R_PPC64_REL24: {
+    uint32_t Mask = 0x03FFFFFC;
+    checkInt<24>(Loc, Val, Type);
+    write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
+    break;
+  }
+  default:
+    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+  }
+}
+
+TargetInfo *elf::createPPC64TargetInfo() { return make<PPC64>(); }

Added: lld/trunk/ELF/Arch/X86.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/X86.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/X86.cpp (added)
+++ lld/trunk/ELF/Arch/X86.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,363 @@
+//===- X86.cpp ------------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+class X86 final : public TargetInfo {
+public:
+  X86();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
+  void writeGotPltHeader(uint8_t *Buf) const override;
+  uint32_t getDynRel(uint32_t Type) const override;
+  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writePltHeader(uint8_t *Buf) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+
+  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                          RelExpr Expr) const override;
+  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+};
+} // namespace
+
+X86::X86() {
+  CopyRel = R_386_COPY;
+  GotRel = R_386_GLOB_DAT;
+  PltRel = R_386_JUMP_SLOT;
+  IRelativeRel = R_386_IRELATIVE;
+  RelativeRel = R_386_RELATIVE;
+  TlsGotRel = R_386_TLS_TPOFF;
+  TlsModuleIndexRel = R_386_TLS_DTPMOD32;
+  TlsOffsetRel = R_386_TLS_DTPOFF32;
+  GotEntrySize = 4;
+  GotPltEntrySize = 4;
+  PltEntrySize = 16;
+  PltHeaderSize = 16;
+  TlsGdRelaxSkip = 2;
+
+  // 0xCC is the "int3" (call debug exception handler) instruction.
+  TrapInstr = 0xcccccccc;
+}
+
+RelExpr X86::getRelExpr(uint32_t Type, const SymbolBody &S,
+                        const uint8_t *Loc) const {
+  switch (Type) {
+  case R_386_8:
+  case R_386_16:
+  case R_386_32:
+  case R_386_TLS_LDO_32:
+    return R_ABS;
+  case R_386_TLS_GD:
+    return R_TLSGD;
+  case R_386_TLS_LDM:
+    return R_TLSLD;
+  case R_386_PLT32:
+    return R_PLT_PC;
+  case R_386_PC8:
+  case R_386_PC16:
+  case R_386_PC32:
+    return R_PC;
+  case R_386_GOTPC:
+    return R_GOTONLY_PC_FROM_END;
+  case R_386_TLS_IE:
+    return R_GOT;
+  case R_386_GOT32:
+  case R_386_GOT32X:
+    // These relocations can be calculated in two different ways.
+    // Usual calculation is G + A - GOT what means an offset in GOT table
+    // (R_GOT_FROM_END). When instruction pointed by relocation has no base
+    // register, then relocations can be used when PIC code is disabled. In that
+    // case calculation is G + A, it resolves to an address of entry in GOT
+    // (R_GOT) and not an offset.
+    //
+    // To check that instruction has no base register we scan ModR/M byte.
+    // See "Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte"
+    // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
+    //  64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
+    if ((Loc[-1] & 0xc7) != 0x5)
+      return R_GOT_FROM_END;
+    if (Config->Pic)
+      error(toString(S.File) + ": relocation " + toString(Type) + " against '" +
+            S.getName() +
+            "' without base register can not be used when PIC enabled");
+    return R_GOT;
+  case R_386_TLS_GOTIE:
+    return R_GOT_FROM_END;
+  case R_386_GOTOFF:
+    return R_GOTREL_FROM_END;
+  case R_386_TLS_LE:
+    return R_TLS;
+  case R_386_TLS_LE_32:
+    return R_NEG_TLS;
+  case R_386_NONE:
+    return R_NONE;
+  default:
+    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
+    return R_HINT;
+  }
+}
+
+RelExpr X86::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                             RelExpr Expr) const {
+  switch (Expr) {
+  default:
+    return Expr;
+  case R_RELAX_TLS_GD_TO_IE:
+    return R_RELAX_TLS_GD_TO_IE_END;
+  case R_RELAX_TLS_GD_TO_LE:
+    return R_RELAX_TLS_GD_TO_LE_NEG;
+  }
+}
+
+void X86::writeGotPltHeader(uint8_t *Buf) const {
+  write32le(Buf, InX::Dynamic->getVA());
+}
+
+void X86::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
+  // Entries in .got.plt initially points back to the corresponding
+  // PLT entries with a fixed offset to skip the first instruction.
+  write32le(Buf, S.getPltVA() + 6);
+}
+
+void X86::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
+  // An x86 entry is the address of the ifunc resolver function.
+  write32le(Buf, S.getVA());
+}
+
+uint32_t X86::getDynRel(uint32_t Type) const {
+  if (Type == R_386_TLS_LE)
+    return R_386_TLS_TPOFF;
+  if (Type == R_386_TLS_LE_32)
+    return R_386_TLS_TPOFF32;
+  return Type;
+}
+
+void X86::writePltHeader(uint8_t *Buf) const {
+  if (Config->Pic) {
+    const uint8_t V[] = {
+        0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl GOTPLT+4(%ebx)
+        0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *GOTPLT+8(%ebx)
+        0x90, 0x90, 0x90, 0x90              // nop
+    };
+    memcpy(Buf, V, sizeof(V));
+
+    uint32_t Ebx = InX::Got->getVA() + InX::Got->getSize();
+    uint32_t GotPlt = InX::GotPlt->getVA() - Ebx;
+    write32le(Buf + 2, GotPlt + 4);
+    write32le(Buf + 8, GotPlt + 8);
+    return;
+  }
+
+  const uint8_t PltData[] = {
+      0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOTPLT+4)
+      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOTPLT+8)
+      0x90, 0x90, 0x90, 0x90              // nop
+  };
+  memcpy(Buf, PltData, sizeof(PltData));
+  uint32_t GotPlt = InX::GotPlt->getVA();
+  write32le(Buf + 2, GotPlt + 4);
+  write32le(Buf + 8, GotPlt + 8);
+}
+
+void X86::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                   uint64_t PltEntryAddr, int32_t Index,
+                   unsigned RelOff) const {
+  const uint8_t Inst[] = {
+      0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo at GOT(%ebx)
+      0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
+      0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0 at PC
+  };
+  memcpy(Buf, Inst, sizeof(Inst));
+
+  if (Config->Pic) {
+    // jmp *foo at GOT(%ebx)
+    uint32_t Ebx = InX::Got->getVA() + InX::Got->getSize();
+    Buf[1] = 0xa3;
+    write32le(Buf + 2, GotPltEntryAddr - Ebx);
+  } else {
+    // jmp *foo_in_GOT
+    Buf[1] = 0x25;
+    write32le(Buf + 2, GotPltEntryAddr);
+  }
+
+  write32le(Buf + 7, RelOff);
+  write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
+}
+
+int64_t X86::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
+  switch (Type) {
+  default:
+    return 0;
+  case R_386_8:
+  case R_386_PC8:
+    return SignExtend64<8>(*Buf);
+  case R_386_16:
+  case R_386_PC16:
+    return SignExtend64<16>(read16le(Buf));
+  case R_386_32:
+  case R_386_GOT32:
+  case R_386_GOT32X:
+  case R_386_GOTOFF:
+  case R_386_GOTPC:
+  case R_386_PC32:
+  case R_386_PLT32:
+  case R_386_TLS_LDO_32:
+  case R_386_TLS_LE:
+    return SignExtend64<32>(read32le(Buf));
+  }
+}
+
+void X86::relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
+  // being used for some 16-bit programs such as boot loaders, so
+  // we want to support them.
+  switch (Type) {
+  case R_386_8:
+    checkUInt<8>(Loc, Val, Type);
+    *Loc = Val;
+    break;
+  case R_386_PC8:
+    checkInt<8>(Loc, Val, Type);
+    *Loc = Val;
+    break;
+  case R_386_16:
+    checkUInt<16>(Loc, Val, Type);
+    write16le(Loc, Val);
+    break;
+  case R_386_PC16:
+    // R_386_PC16 is normally used with 16 bit code. In that situation
+    // the PC is 16 bits, just like the addend. This means that it can
+    // point from any 16 bit address to any other if the possibility
+    // of wrapping is included.
+    // The only restriction we have to check then is that the destination
+    // address fits in 16 bits. That is impossible to do here. The problem is
+    // that we are passed the final value, which already had the
+    // current location subtracted from it.
+    // We just check that Val fits in 17 bits. This misses some cases, but
+    // should have no false positives.
+    checkInt<17>(Loc, Val, Type);
+    write16le(Loc, Val);
+    break;
+  default:
+    checkInt<32>(Loc, Val, Type);
+    write32le(Loc, Val);
+  }
+}
+
+void X86::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // Convert
+  //   leal x at tlsgd(, %ebx, 1),
+  //   call __tls_get_addr at plt
+  // to
+  //   movl %gs:0,%eax
+  //   subl $x at ntpoff,%eax
+  const uint8_t Inst[] = {
+      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
+      0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
+  };
+  memcpy(Loc - 3, Inst, sizeof(Inst));
+  write32le(Loc + 5, Val);
+}
+
+void X86::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // Convert
+  //   leal x at tlsgd(, %ebx, 1),
+  //   call __tls_get_addr at plt
+  // to
+  //   movl %gs:0, %eax
+  //   addl x at gotntpoff(%ebx), %eax
+  const uint8_t Inst[] = {
+      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
+      0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
+  };
+  memcpy(Loc - 3, Inst, sizeof(Inst));
+  write32le(Loc + 5, Val);
+}
+
+// In some conditions, relocations can be optimized to avoid using GOT.
+// This function does that for Initial Exec to Local Exec case.
+void X86::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  // Ulrich's document section 6.2 says that @gotntpoff can
+  // be used with MOVL or ADDL instructions.
+  // @indntpoff is similar to @gotntpoff, but for use in
+  // position dependent code.
+  uint8_t Reg = (Loc[-1] >> 3) & 7;
+
+  if (Type == R_386_TLS_IE) {
+    if (Loc[-1] == 0xa1) {
+      // "movl foo at indntpoff,%eax" -> "movl $foo,%eax"
+      // This case is different from the generic case below because
+      // this is a 5 byte instruction while below is 6 bytes.
+      Loc[-1] = 0xb8;
+    } else if (Loc[-2] == 0x8b) {
+      // "movl foo at indntpoff,%reg" -> "movl $foo,%reg"
+      Loc[-2] = 0xc7;
+      Loc[-1] = 0xc0 | Reg;
+    } else {
+      // "addl foo at indntpoff,%reg" -> "addl $foo,%reg"
+      Loc[-2] = 0x81;
+      Loc[-1] = 0xc0 | Reg;
+    }
+  } else {
+    assert(Type == R_386_TLS_GOTIE);
+    if (Loc[-2] == 0x8b) {
+      // "movl foo at gottpoff(%rip),%reg" -> "movl $foo,%reg"
+      Loc[-2] = 0xc7;
+      Loc[-1] = 0xc0 | Reg;
+    } else {
+      // "addl foo at gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
+      Loc[-2] = 0x8d;
+      Loc[-1] = 0x80 | (Reg << 3) | Reg;
+    }
+  }
+  write32le(Loc, Val);
+}
+
+void X86::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const {
+  if (Type == R_386_TLS_LDO_32) {
+    write32le(Loc, Val);
+    return;
+  }
+
+  // Convert
+  //   leal foo(%reg),%eax
+  //   call ___tls_get_addr
+  // to
+  //   movl %gs:0,%eax
+  //   nop
+  //   leal 0(%esi,1),%esi
+  const uint8_t Inst[] = {
+      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
+      0x90,                               // nop
+      0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
+  };
+  memcpy(Loc - 2, Inst, sizeof(Inst));
+}
+
+TargetInfo *elf::createX86TargetInfo() { return make<X86>(); }

Added: lld/trunk/ELF/Arch/X86_64.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Arch/X86_64.cpp?rev=305565&view=auto
==============================================================================
--- lld/trunk/ELF/Arch/X86_64.cpp (added)
+++ lld/trunk/ELF/Arch/X86_64.cpp Fri Jun 16 12:32:43 2017
@@ -0,0 +1,468 @@
+//===- X86_64.cpp ---------------------------------------------------------===//
+//
+//                             The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Error.h"
+#include "InputFiles.h"
+#include "Memory.h"
+#include "Symbols.h"
+#include "SyntheticSections.h"
+#include "Target.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+namespace {
+template <class ELFT> class X86_64 final : public TargetInfo {
+public:
+  X86_64();
+  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
+                     const uint8_t *Loc) const override;
+  bool isPicRel(uint32_t Type) const override;
+  void writeGotPltHeader(uint8_t *Buf) const override;
+  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
+  void writePltHeader(uint8_t *Buf) const override;
+  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
+                int32_t Index, unsigned RelOff) const override;
+  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+
+  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                          RelExpr Expr) const override;
+  void relaxGot(uint8_t *Loc, uint64_t Val) const override;
+  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+  void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
+
+private:
+  void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
+                     uint8_t ModRm) const;
+};
+} // namespace
+
+template <class ELFT> X86_64<ELFT>::X86_64() {
+  CopyRel = R_X86_64_COPY;
+  GotRel = R_X86_64_GLOB_DAT;
+  PltRel = R_X86_64_JUMP_SLOT;
+  RelativeRel = R_X86_64_RELATIVE;
+  IRelativeRel = R_X86_64_IRELATIVE;
+  TlsGotRel = R_X86_64_TPOFF64;
+  TlsModuleIndexRel = R_X86_64_DTPMOD64;
+  TlsOffsetRel = R_X86_64_DTPOFF64;
+  GotEntrySize = 8;
+  GotPltEntrySize = 8;
+  PltEntrySize = 16;
+  PltHeaderSize = 16;
+  TlsGdRelaxSkip = 2;
+
+  // Align to the large page size (known as a superpage or huge page).
+  // FreeBSD automatically promotes large, superpage-aligned allocations.
+  DefaultImageBase = 0x200000;
+
+  // 0xCC is the "int3" (call debug exception handler) instruction.
+  TrapInstr = 0xcccccccc;
+}
+
+template <class ELFT>
+RelExpr X86_64<ELFT>::getRelExpr(uint32_t Type, const SymbolBody &S,
+                                 const uint8_t *Loc) const {
+  switch (Type) {
+  case R_X86_64_8:
+  case R_X86_64_16:
+  case R_X86_64_32:
+  case R_X86_64_32S:
+  case R_X86_64_64:
+  case R_X86_64_DTPOFF32:
+  case R_X86_64_DTPOFF64:
+    return R_ABS;
+  case R_X86_64_TPOFF32:
+    return R_TLS;
+  case R_X86_64_TLSLD:
+    return R_TLSLD_PC;
+  case R_X86_64_TLSGD:
+    return R_TLSGD_PC;
+  case R_X86_64_SIZE32:
+  case R_X86_64_SIZE64:
+    return R_SIZE;
+  case R_X86_64_PLT32:
+    return R_PLT_PC;
+  case R_X86_64_PC32:
+  case R_X86_64_PC64:
+    return R_PC;
+  case R_X86_64_GOT32:
+  case R_X86_64_GOT64:
+    return R_GOT_FROM_END;
+  case R_X86_64_GOTPCREL:
+  case R_X86_64_GOTPCRELX:
+  case R_X86_64_REX_GOTPCRELX:
+  case R_X86_64_GOTTPOFF:
+    return R_GOT_PC;
+  case R_X86_64_NONE:
+    return R_NONE;
+  default:
+    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
+    return R_HINT;
+  }
+}
+
+template <class ELFT> void X86_64<ELFT>::writeGotPltHeader(uint8_t *Buf) const {
+  // The first entry holds the value of _DYNAMIC. It is not clear why that is
+  // required, but it is documented in the psabi and the glibc dynamic linker
+  // seems to use it (note that this is relevant for linking ld.so, not any
+  // other program).
+  write64le(Buf, InX::Dynamic->getVA());
+}
+
+template <class ELFT>
+void X86_64<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
+  // See comments in X86TargetInfo::writeGotPlt.
+  write32le(Buf, S.getPltVA() + 6);
+}
+
+template <class ELFT> void X86_64<ELFT>::writePltHeader(uint8_t *Buf) const {
+  const uint8_t PltData[] = {
+      0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOTPLT+8(%rip)
+      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOTPLT+16(%rip)
+      0x0f, 0x1f, 0x40, 0x00              // nop
+  };
+  memcpy(Buf, PltData, sizeof(PltData));
+  uint64_t GotPlt = InX::GotPlt->getVA();
+  uint64_t Plt = InX::Plt->getVA();
+  write32le(Buf + 2, GotPlt - Plt + 2); // GOTPLT+8
+  write32le(Buf + 8, GotPlt - Plt + 4); // GOTPLT+16
+}
+
+template <class ELFT>
+void X86_64<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
+                            uint64_t PltEntryAddr, int32_t Index,
+                            unsigned RelOff) const {
+  const uint8_t Inst[] = {
+      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
+      0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
+      0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
+  };
+  memcpy(Buf, Inst, sizeof(Inst));
+
+  write32le(Buf + 2, GotPltEntryAddr - PltEntryAddr - 6);
+  write32le(Buf + 7, Index);
+  write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
+}
+
+template <class ELFT> bool X86_64<ELFT>::isPicRel(uint32_t Type) const {
+  return Type != R_X86_64_PC32 && Type != R_X86_64_32 &&
+         Type != R_X86_64_TPOFF32;
+}
+
+template <class ELFT>
+void X86_64<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
+                                  uint64_t Val) const {
+  // Convert
+  //   .byte 0x66
+  //   leaq x at tlsgd(%rip), %rdi
+  //   .word 0x6666
+  //   rex64
+  //   call __tls_get_addr at plt
+  // to
+  //   mov %fs:0x0,%rax
+  //   lea x at tpoff,%rax
+  const uint8_t Inst[] = {
+      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
+      0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x at tpoff,%rax
+  };
+  memcpy(Loc - 4, Inst, sizeof(Inst));
+
+  // The original code used a pc relative relocation and so we have to
+  // compensate for the -4 in had in the addend.
+  write32le(Loc + 8, Val + 4);
+}
+
+template <class ELFT>
+void X86_64<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
+                                  uint64_t Val) const {
+  // Convert
+  //   .byte 0x66
+  //   leaq x at tlsgd(%rip), %rdi
+  //   .word 0x6666
+  //   rex64
+  //   call __tls_get_addr at plt
+  // to
+  //   mov %fs:0x0,%rax
+  //   addq x at tpoff,%rax
+  const uint8_t Inst[] = {
+      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
+      0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x at tpoff,%rax
+  };
+  memcpy(Loc - 4, Inst, sizeof(Inst));
+
+  // Both code sequences are PC relatives, but since we are moving the constant
+  // forward by 8 bytes we have to subtract the value by 8.
+  write32le(Loc + 8, Val - 8);
+}
+
+// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
+// R_X86_64_TPOFF32 so that it does not use GOT.
+template <class ELFT>
+void X86_64<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
+                                  uint64_t Val) const {
+  uint8_t *Inst = Loc - 3;
+  uint8_t Reg = Loc[-1] >> 3;
+  uint8_t *RegSlot = Loc - 1;
+
+  // Note that ADD with RSP or R12 is converted to ADD instead of LEA
+  // because LEA with these registers needs 4 bytes to encode and thus
+  // wouldn't fit the space.
+
+  if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
+    // "addq foo at gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
+    memcpy(Inst, "\x48\x81\xc4", 3);
+  } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
+    // "addq foo at gottpoff(%rip),%r12" -> "addq $foo,%r12"
+    memcpy(Inst, "\x49\x81\xc4", 3);
+  } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
+    // "addq foo at gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
+    memcpy(Inst, "\x4d\x8d", 2);
+    *RegSlot = 0x80 | (Reg << 3) | Reg;
+  } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
+    // "addq foo at gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
+    memcpy(Inst, "\x48\x8d", 2);
+    *RegSlot = 0x80 | (Reg << 3) | Reg;
+  } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
+    // "movq foo at gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
+    memcpy(Inst, "\x49\xc7", 2);
+    *RegSlot = 0xc0 | Reg;
+  } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
+    // "movq foo at gottpoff(%rip),%reg" -> "movq $foo,%reg"
+    memcpy(Inst, "\x48\xc7", 2);
+    *RegSlot = 0xc0 | Reg;
+  } else {
+    error(getErrorLocation(Loc - 3) +
+          "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
+  }
+
+  // The original code used a PC relative relocation.
+  // Need to compensate for the -4 it had in the addend.
+  write32le(Loc, Val + 4);
+}
+
+template <class ELFT>
+void X86_64<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
+                                  uint64_t Val) const {
+  // Convert
+  //   leaq bar at tlsld(%rip), %rdi
+  //   callq __tls_get_addr at PLT
+  //   leaq bar at dtpoff(%rax), %rcx
+  // to
+  //   .word 0x6666
+  //   .byte 0x66
+  //   mov %fs:0,%rax
+  //   leaq bar at tpoff(%rax), %rcx
+  if (Type == R_X86_64_DTPOFF64) {
+    write64le(Loc, Val);
+    return;
+  }
+  if (Type == R_X86_64_DTPOFF32) {
+    write32le(Loc, Val);
+    return;
+  }
+
+  const uint8_t Inst[] = {
+      0x66, 0x66,                                          // .word 0x6666
+      0x66,                                                // .byte 0x66
+      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
+  };
+  memcpy(Loc - 3, Inst, sizeof(Inst));
+}
+
+template <class ELFT>
+void X86_64<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
+                               uint64_t Val) const {
+  switch (Type) {
+  case R_X86_64_8:
+    checkUInt<8>(Loc, Val, Type);
+    *Loc = Val;
+    break;
+  case R_X86_64_16:
+    checkUInt<16>(Loc, Val, Type);
+    write16le(Loc, Val);
+    break;
+  case R_X86_64_32:
+    checkUInt<32>(Loc, Val, Type);
+    write32le(Loc, Val);
+    break;
+  case R_X86_64_32S:
+  case R_X86_64_TPOFF32:
+  case R_X86_64_GOT32:
+  case R_X86_64_GOTPCREL:
+  case R_X86_64_GOTPCRELX:
+  case R_X86_64_REX_GOTPCRELX:
+  case R_X86_64_PC32:
+  case R_X86_64_GOTTPOFF:
+  case R_X86_64_PLT32:
+  case R_X86_64_TLSGD:
+  case R_X86_64_TLSLD:
+  case R_X86_64_DTPOFF32:
+  case R_X86_64_SIZE32:
+    checkInt<32>(Loc, Val, Type);
+    write32le(Loc, Val);
+    break;
+  case R_X86_64_64:
+  case R_X86_64_DTPOFF64:
+  case R_X86_64_GLOB_DAT:
+  case R_X86_64_PC64:
+  case R_X86_64_SIZE64:
+  case R_X86_64_GOT64:
+    write64le(Loc, Val);
+    break;
+  default:
+    llvm_unreachable("unexpected relocation");
+  }
+}
+
+template <class ELFT>
+RelExpr X86_64<ELFT>::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
+                                      RelExpr RelExpr) const {
+  if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
+    return RelExpr;
+  const uint8_t Op = Data[-2];
+  const uint8_t ModRm = Data[-1];
+
+  // FIXME: When PIC is disabled and foo is defined locally in the
+  // lower 32 bit address space, memory operand in mov can be converted into
+  // immediate operand. Otherwise, mov must be changed to lea. We support only
+  // latter relaxation at this moment.
+  if (Op == 0x8b)
+    return R_RELAX_GOT_PC;
+
+  // Relax call and jmp.
+  if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
+    return R_RELAX_GOT_PC;
+
+  // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
+  // If PIC then no relaxation is available.
+  // We also don't relax test/binop instructions without REX byte,
+  // they are 32bit operations and not common to have.
+  assert(Type == R_X86_64_REX_GOTPCRELX);
+  return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
+}
+
+// A subset of relaxations can only be applied for no-PIC. This method
+// handles such relaxations. Instructions encoding information was taken from:
+// "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
+// (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
+//    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
+template <class ELFT>
+void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
+                                 uint8_t ModRm) const {
+  const uint8_t Rex = Loc[-3];
+  // Convert "test %reg, foo at GOTPCREL(%rip)" to "test $foo, %reg".
+  if (Op == 0x85) {
+    // See "TEST-Logical Compare" (4-428 Vol. 2B),
+    // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
+
+    // ModR/M byte has form XX YYY ZZZ, where
+    // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
+    // XX has different meanings:
+    // 00: The operand's memory address is in reg1.
+    // 01: The operand's memory address is reg1 + a byte-sized displacement.
+    // 10: The operand's memory address is reg1 + a word-sized displacement.
+    // 11: The operand is reg1 itself.
+    // If an instruction requires only one operand, the unused reg2 field
+    // holds extra opcode bits rather than a register code
+    // 0xC0 == 11 000 000 binary.
+    // 0x38 == 00 111 000 binary.
+    // We transfer reg2 to reg1 here as operand.
+    // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
+    Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
+
+    // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
+    // See "TEST-Logical Compare" (4-428 Vol. 2B).
+    Loc[-2] = 0xf7;
+
+    // Move R bit to the B bit in REX byte.
+    // REX byte is encoded as 0100WRXB, where
+    // 0100 is 4bit fixed pattern.
+    // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
+    //   default operand size is used (which is 32-bit for most but not all
+    //   instructions).
+    // REX.R This 1-bit value is an extension to the MODRM.reg field.
+    // REX.X This 1-bit value is an extension to the SIB.index field.
+    // REX.B This 1-bit value is an extension to the MODRM.rm field or the
+    // SIB.base field.
+    // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
+    Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
+    write32le(Loc, Val);
+    return;
+  }
+
+  // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
+  // or xor operations.
+
+  // Convert "binop foo at GOTPCREL(%rip), %reg" to "binop $foo, %reg".
+  // Logic is close to one for test instruction above, but we also
+  // write opcode extension here, see below for details.
+  Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
+
+  // Primary opcode is 0x81, opcode extension is one of:
+  // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
+  // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
+  // This value was wrote to MODRM.reg in a line above.
+  // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
+  // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
+  // descriptions about each operation.
+  Loc[-2] = 0x81;
+  Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
+  write32le(Loc, Val);
+}
+
+template <class ELFT>
+void X86_64<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const {
+  const uint8_t Op = Loc[-2];
+  const uint8_t ModRm = Loc[-1];
+
+  // Convert "mov foo at GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
+  if (Op == 0x8b) {
+    Loc[-2] = 0x8d;
+    write32le(Loc, Val);
+    return;
+  }
+
+  if (Op != 0xff) {
+    // We are relaxing a rip relative to an absolute, so compensate
+    // for the old -4 addend.
+    assert(!Config->Pic);
+    relaxGotNoPic(Loc, Val + 4, Op, ModRm);
+    return;
+  }
+
+  // Convert call/jmp instructions.
+  if (ModRm == 0x15) {
+    // ABI says we can convert "call *foo at GOTPCREL(%rip)" to "nop; call foo".
+    // Instead we convert to "addr32 call foo" where addr32 is an instruction
+    // prefix. That makes result expression to be a single instruction.
+    Loc[-2] = 0x67; // addr32 prefix
+    Loc[-1] = 0xe8; // call
+    write32le(Loc, Val);
+    return;
+  }
+
+  // Convert "jmp *foo at GOTPCREL(%rip)" to "jmp foo; nop".
+  // jmp doesn't return, so it is fine to use nop here, it is just a stub.
+  assert(ModRm == 0x25);
+  Loc[-2] = 0xe9; // jmp
+  Loc[3] = 0x90;  // nop
+  write32le(Loc - 1, Val + 1);
+}
+
+TargetInfo *elf::createX32TargetInfo() { return make<X86_64<ELF32LE>>(); }
+TargetInfo *elf::createX86_64TargetInfo() { return make<X86_64<ELF64LE>>(); }

Modified: lld/trunk/ELF/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/CMakeLists.txt?rev=305565&r1=305564&r2=305565&view=diff
==============================================================================
--- lld/trunk/ELF/CMakeLists.txt (original)
+++ lld/trunk/ELF/CMakeLists.txt Fri Jun 16 12:32:43 2017
@@ -7,6 +7,15 @@ if(NOT LLD_BUILT_STANDALONE)
 endif()
 
 add_lld_library(lldELF
+  Arch/AArch64.cpp
+  Arch/AMDGPU.cpp
+  Arch/ARM.cpp
+  Arch/AVR.cpp
+  Arch/Mips.cpp
+  Arch/PPC.cpp
+  Arch/PPC64.cpp
+  Arch/X86.cpp
+  Arch/X86_64.cpp
   Driver.cpp
   DriverUtils.cpp
   EhFrame.cpp

Modified: lld/trunk/ELF/Target.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Target.cpp?rev=305565&r1=305564&r2=305565&view=diff
==============================================================================
--- lld/trunk/ELF/Target.cpp (original)
+++ lld/trunk/ELF/Target.cpp Fri Jun 16 12:32:43 2017
@@ -27,22 +27,18 @@
 #include "Target.h"
 #include "Error.h"
 #include "InputFiles.h"
-#include "Memory.h"
 #include "OutputSections.h"
 #include "SymbolTable.h"
 #include "Symbols.h"
-#include "SyntheticSections.h"
-#include "Thunks.h"
-#include "Writer.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/BinaryFormat/ELF.h"
 #include "llvm/Object/ELF.h"
-#include "llvm/Support/Endian.h"
 
 using namespace llvm;
 using namespace llvm::object;
-using namespace llvm::support::endian;
 using namespace llvm::ELF;
+using namespace lld;
+using namespace lld::elf;
+
+TargetInfo *elf::Target;
 
 std::string lld::toString(uint32_t Type) {
   StringRef S = getELFRelocationTypeName(elf::Config->EMachine, Type);
@@ -51,13 +47,43 @@ std::string lld::toString(uint32_t Type)
   return S;
 }
 
-namespace lld {
-namespace elf {
-
-TargetInfo *Target;
-
-static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
-static void or32be(uint8_t *P, int32_t V) { write32be(P, read32be(P) | V); }
+TargetInfo *elf::createTarget() {
+  switch (Config->EMachine) {
+  case EM_386:
+  case EM_IAMCU:
+    return createX86TargetInfo();
+  case EM_AARCH64:
+    return createAArch64TargetInfo();
+  case EM_AMDGPU:
+    return createAMDGPUTargetInfo();
+  case EM_ARM:
+    return createARMTargetInfo();
+  case EM_AVR:
+    return createAVRTargetInfo();
+  case EM_MIPS:
+    switch (Config->EKind) {
+    case ELF32LEKind:
+      return createMipsTargetInfo<ELF32LE>();
+    case ELF32BEKind:
+      return createMipsTargetInfo<ELF32BE>();
+    case ELF64LEKind:
+      return createMipsTargetInfo<ELF64LE>();
+    case ELF64BEKind:
+      return createMipsTargetInfo<ELF64BE>();
+    default:
+      fatal("unsupported MIPS target");
+    }
+  case EM_PPC:
+    return createPPCTargetInfo();
+  case EM_PPC64:
+    return createPPC64TargetInfo();
+  case EM_X86_64:
+    if (Config->EKind == ELF32LEKind)
+      return createX32TargetInfo();
+    return createX86_64TargetInfo();
+  }
+  fatal("unknown target machine");
+}
 
 template <class ELFT> static std::string getErrorLoc(const uint8_t *Loc) {
   for (InputSectionBase *D : InputSections) {
@@ -72,7 +98,7 @@ template <class ELFT> static std::string
   return "";
 }
 
-static std::string getErrorLocation(const uint8_t *Loc) {
+std::string elf::getErrorLocation(const uint8_t *Loc) {
   switch (Config->EKind) {
   case ELF32LEKind:
     return getErrorLoc<ELF32LE>(Loc);
@@ -87,213 +113,6 @@ static std::string getErrorLocation(cons
   }
 }
 
-template <unsigned N>
-static void checkInt(uint8_t *Loc, int64_t V, uint32_t Type) {
-  if (!isInt<N>(V))
-    error(getErrorLocation(Loc) + "relocation " + toString(Type) +
-          " out of range");
-}
-
-template <unsigned N>
-static void checkUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
-  if (!isUInt<N>(V))
-    error(getErrorLocation(Loc) + "relocation " + toString(Type) +
-          " out of range");
-}
-
-template <unsigned N>
-static void checkIntUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
-  if (!isInt<N>(V) && !isUInt<N>(V))
-    error(getErrorLocation(Loc) + "relocation " + toString(Type) +
-          " out of range");
-}
-
-template <unsigned N>
-static void checkAlignment(uint8_t *Loc, uint64_t V, uint32_t Type) {
-  if ((V & (N - 1)) != 0)
-    error(getErrorLocation(Loc) + "improper alignment for relocation " +
-          toString(Type));
-}
-
-namespace {
-class X86TargetInfo final : public TargetInfo {
-public:
-  X86TargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
-  void writeGotPltHeader(uint8_t *Buf) const override;
-  uint32_t getDynRel(uint32_t Type) const override;
-  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writePltHeader(uint8_t *Buf) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-
-  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
-                          RelExpr Expr) const override;
-  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-};
-
-template <class ELFT> class X86_64TargetInfo final : public TargetInfo {
-public:
-  X86_64TargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  bool isPicRel(uint32_t Type) const override;
-  void writeGotPltHeader(uint8_t *Buf) const override;
-  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writePltHeader(uint8_t *Buf) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-
-  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
-                          RelExpr Expr) const override;
-  void relaxGot(uint8_t *Loc, uint64_t Val) const override;
-  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-
-private:
-  void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
-                     uint8_t ModRm) const;
-};
-
-class PPCTargetInfo final : public TargetInfo {
-public:
-  PPCTargetInfo();
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-};
-
-class PPC64TargetInfo final : public TargetInfo {
-public:
-  PPC64TargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-};
-
-class AArch64TargetInfo final : public TargetInfo {
-public:
-  AArch64TargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  bool isPicRel(uint32_t Type) const override;
-  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writePltHeader(uint8_t *Buf) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  bool usesOnlyLowPageBits(uint32_t Type) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  RelExpr adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
-                          RelExpr Expr) const override;
-  void relaxTlsGdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsGdToIe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  void relaxTlsIeToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-};
-
-class AMDGPUTargetInfo final : public TargetInfo {
-public:
-  AMDGPUTargetInfo();
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-};
-
-class ARMTargetInfo final : public TargetInfo {
-public:
-  ARMTargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  bool isPicRel(uint32_t Type) const override;
-  uint32_t getDynRel(uint32_t Type) const override;
-  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
-  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writePltHeader(uint8_t *Buf) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  void addPltSymbols(InputSectionBase *IS, uint64_t Off) const override;
-  void addPltHeaderSymbols(InputSectionBase *ISD) const override;
-  bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
-                  const SymbolBody &S) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-};
-
-class AVRTargetInfo final : public TargetInfo {
-public:
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-};
-
-template <class ELFT> class MipsTargetInfo final : public TargetInfo {
-public:
-  MipsTargetInfo();
-  RelExpr getRelExpr(uint32_t Type, const SymbolBody &S,
-                     const uint8_t *Loc) const override;
-  int64_t getImplicitAddend(const uint8_t *Buf, uint32_t Type) const override;
-  bool isPicRel(uint32_t Type) const override;
-  uint32_t getDynRel(uint32_t Type) const override;
-  void writeGotPlt(uint8_t *Buf, const SymbolBody &S) const override;
-  void writePltHeader(uint8_t *Buf) const override;
-  void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
-                int32_t Index, unsigned RelOff) const override;
-  bool needsThunk(RelExpr Expr, uint32_t RelocType, const InputFile *File,
-                  const SymbolBody &S) const override;
-  void relocateOne(uint8_t *Loc, uint32_t Type, uint64_t Val) const override;
-  bool usesOnlyLowPageBits(uint32_t Type) const override;
-};
-} // anonymous namespace
-
-TargetInfo *createTarget() {
-  switch (Config->EMachine) {
-  case EM_386:
-  case EM_IAMCU:
-    return make<X86TargetInfo>();
-  case EM_AARCH64:
-    return make<AArch64TargetInfo>();
-  case EM_AMDGPU:
-    return make<AMDGPUTargetInfo>();
-  case EM_ARM:
-    return make<ARMTargetInfo>();
-  case EM_AVR:
-    return make<AVRTargetInfo>();
-  case EM_MIPS:
-    switch (Config->EKind) {
-    case ELF32LEKind:
-      return make<MipsTargetInfo<ELF32LE>>();
-    case ELF32BEKind:
-      return make<MipsTargetInfo<ELF32BE>>();
-    case ELF64LEKind:
-      return make<MipsTargetInfo<ELF64LE>>();
-    case ELF64BEKind:
-      return make<MipsTargetInfo<ELF64BE>>();
-    default:
-      fatal("unsupported MIPS target");
-    }
-  case EM_PPC:
-    return make<PPCTargetInfo>();
-  case EM_PPC64:
-    return make<PPC64TargetInfo>();
-  case EM_X86_64:
-    if (Config->EKind == ELF32LEKind)
-      return make<X86_64TargetInfo<ELF32LE>>();
-    return make<X86_64TargetInfo<ELF64LE>>();
-  }
-  fatal("unknown target machine");
-}
-
 TargetInfo::~TargetInfo() {}
 
 int64_t TargetInfo::getImplicitAddend(const uint8_t *Buf, uint32_t Type) const {
@@ -339,2117 +158,3 @@ void TargetInfo::relaxTlsLdToLe(uint8_t
                                 uint64_t Val) const {
   llvm_unreachable("Should not have claimed to be relaxable");
 }
-
-X86TargetInfo::X86TargetInfo() {
-  CopyRel = R_386_COPY;
-  GotRel = R_386_GLOB_DAT;
-  PltRel = R_386_JUMP_SLOT;
-  IRelativeRel = R_386_IRELATIVE;
-  RelativeRel = R_386_RELATIVE;
-  TlsGotRel = R_386_TLS_TPOFF;
-  TlsModuleIndexRel = R_386_TLS_DTPMOD32;
-  TlsOffsetRel = R_386_TLS_DTPOFF32;
-  GotEntrySize = 4;
-  GotPltEntrySize = 4;
-  PltEntrySize = 16;
-  PltHeaderSize = 16;
-  TlsGdRelaxSkip = 2;
-  // 0xCC is the "int3" (call debug exception handler) instruction.
-  TrapInstr = 0xcccccccc;
-}
-
-RelExpr X86TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                  const uint8_t *Loc) const {
-  switch (Type) {
-  case R_386_8:
-  case R_386_16:
-  case R_386_32:
-  case R_386_TLS_LDO_32:
-    return R_ABS;
-  case R_386_TLS_GD:
-    return R_TLSGD;
-  case R_386_TLS_LDM:
-    return R_TLSLD;
-  case R_386_PLT32:
-    return R_PLT_PC;
-  case R_386_PC8:
-  case R_386_PC16:
-  case R_386_PC32:
-    return R_PC;
-  case R_386_GOTPC:
-    return R_GOTONLY_PC_FROM_END;
-  case R_386_TLS_IE:
-    return R_GOT;
-  case R_386_GOT32:
-  case R_386_GOT32X:
-    // These relocations can be calculated in two different ways.
-    // Usual calculation is G + A - GOT what means an offset in GOT table
-    // (R_GOT_FROM_END). When instruction pointed by relocation has no base
-    // register, then relocations can be used when PIC code is disabled. In that
-    // case calculation is G + A, it resolves to an address of entry in GOT
-    // (R_GOT) and not an offset.
-    //
-    // To check that instruction has no base register we scan ModR/M byte.
-    // See "Table 2-2. 32-Bit Addressing Forms with the ModR/M Byte"
-    // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
-    //  64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
-    if ((Loc[-1] & 0xc7) != 0x5)
-      return R_GOT_FROM_END;
-    if (Config->Pic)
-      error(toString(S.File) + ": relocation " + toString(Type) + " against '" +
-            S.getName() +
-            "' without base register can not be used when PIC enabled");
-    return R_GOT;
-  case R_386_TLS_GOTIE:
-    return R_GOT_FROM_END;
-  case R_386_GOTOFF:
-    return R_GOTREL_FROM_END;
-  case R_386_TLS_LE:
-    return R_TLS;
-  case R_386_TLS_LE_32:
-    return R_NEG_TLS;
-  case R_386_NONE:
-    return R_NONE;
-  default:
-    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
-    return R_HINT;
-  }
-}
-
-RelExpr X86TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
-                                       RelExpr Expr) const {
-  switch (Expr) {
-  default:
-    return Expr;
-  case R_RELAX_TLS_GD_TO_IE:
-    return R_RELAX_TLS_GD_TO_IE_END;
-  case R_RELAX_TLS_GD_TO_LE:
-    return R_RELAX_TLS_GD_TO_LE_NEG;
-  }
-}
-
-void X86TargetInfo::writeGotPltHeader(uint8_t *Buf) const {
-  write32le(Buf, InX::Dynamic->getVA());
-}
-
-void X86TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &S) const {
-  // Entries in .got.plt initially points back to the corresponding
-  // PLT entries with a fixed offset to skip the first instruction.
-  write32le(Buf, S.getPltVA() + 6);
-}
-
-void X86TargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
-  // An x86 entry is the address of the ifunc resolver function.
-  write32le(Buf, S.getVA());
-}
-
-uint32_t X86TargetInfo::getDynRel(uint32_t Type) const {
-  if (Type == R_386_TLS_LE)
-    return R_386_TLS_TPOFF;
-  if (Type == R_386_TLS_LE_32)
-    return R_386_TLS_TPOFF32;
-  return Type;
-}
-
-void X86TargetInfo::writePltHeader(uint8_t *Buf) const {
-  if (Config->Pic) {
-    const uint8_t V[] = {
-        0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl GOTPLT+4(%ebx)
-        0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *GOTPLT+8(%ebx)
-        0x90, 0x90, 0x90, 0x90              // nop
-    };
-    memcpy(Buf, V, sizeof(V));
-
-    uint32_t Ebx = InX::Got->getVA() + InX::Got->getSize();
-    uint32_t GotPlt = InX::GotPlt->getVA() - Ebx;
-    write32le(Buf + 2, GotPlt + 4);
-    write32le(Buf + 8, GotPlt + 8);
-    return;
-  }
-
-  const uint8_t PltData[] = {
-      0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushl (GOTPLT+4)
-      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *(GOTPLT+8)
-      0x90, 0x90, 0x90, 0x90              // nop
-  };
-  memcpy(Buf, PltData, sizeof(PltData));
-  uint32_t GotPlt = InX::GotPlt->getVA();
-  write32le(Buf + 2, GotPlt + 4);
-  write32le(Buf + 8, GotPlt + 8);
-}
-
-void X86TargetInfo::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                             uint64_t PltEntryAddr, int32_t Index,
-                             unsigned RelOff) const {
-  const uint8_t Inst[] = {
-      0xff, 0x00, 0x00, 0x00, 0x00, 0x00, // jmp *foo_in_GOT|*foo at GOT(%ebx)
-      0x68, 0x00, 0x00, 0x00, 0x00,       // pushl $reloc_offset
-      0xe9, 0x00, 0x00, 0x00, 0x00        // jmp .PLT0 at PC
-  };
-  memcpy(Buf, Inst, sizeof(Inst));
-
-  if (Config->Pic) {
-    // jmp *foo at GOT(%ebx)
-    uint32_t Ebx = InX::Got->getVA() + InX::Got->getSize();
-    Buf[1] = 0xa3;
-    write32le(Buf + 2, GotPltEntryAddr - Ebx);
-  } else {
-    // jmp *foo_in_GOT
-    Buf[1] = 0x25;
-    write32le(Buf + 2, GotPltEntryAddr);
-  }
-
-  write32le(Buf + 7, RelOff);
-  write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
-}
-
-int64_t X86TargetInfo::getImplicitAddend(const uint8_t *Buf,
-                                         uint32_t Type) const {
-  switch (Type) {
-  default:
-    return 0;
-  case R_386_8:
-  case R_386_PC8:
-    return SignExtend64<8>(*Buf);
-  case R_386_16:
-  case R_386_PC16:
-    return SignExtend64<16>(read16le(Buf));
-  case R_386_32:
-  case R_386_GOT32:
-  case R_386_GOT32X:
-  case R_386_GOTOFF:
-  case R_386_GOTPC:
-  case R_386_PC32:
-  case R_386_PLT32:
-  case R_386_TLS_LDO_32:
-  case R_386_TLS_LE:
-    return SignExtend64<32>(read32le(Buf));
-  }
-}
-
-void X86TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                uint64_t Val) const {
-  // R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
-  // being used for some 16-bit programs such as boot loaders, so
-  // we want to support them.
-  switch (Type) {
-  case R_386_8:
-    checkUInt<8>(Loc, Val, Type);
-    *Loc = Val;
-    break;
-  case R_386_PC8:
-    checkInt<8>(Loc, Val, Type);
-    *Loc = Val;
-    break;
-  case R_386_16:
-    checkUInt<16>(Loc, Val, Type);
-    write16le(Loc, Val);
-    break;
-  case R_386_PC16:
-    // R_386_PC16 is normally used with 16 bit code. In that situation
-    // the PC is 16 bits, just like the addend. This means that it can
-    // point from any 16 bit address to any other if the possibility
-    // of wrapping is included.
-    // The only restriction we have to check then is that the destination
-    // address fits in 16 bits. That is impossible to do here. The problem is
-    // that we are passed the final value, which already had the
-    // current location subtracted from it.
-    // We just check that Val fits in 17 bits. This misses some cases, but
-    // should have no false positives.
-    checkInt<17>(Loc, Val, Type);
-    write16le(Loc, Val);
-    break;
-  default:
-    checkInt<32>(Loc, Val, Type);
-    write32le(Loc, Val);
-  }
-}
-
-void X86TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
-                                   uint64_t Val) const {
-  // Convert
-  //   leal x at tlsgd(, %ebx, 1),
-  //   call __tls_get_addr at plt
-  // to
-  //   movl %gs:0,%eax
-  //   subl $x at ntpoff,%eax
-  const uint8_t Inst[] = {
-      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
-      0x81, 0xe8, 0x00, 0x00, 0x00, 0x00  // subl 0(%ebx), %eax
-  };
-  memcpy(Loc - 3, Inst, sizeof(Inst));
-  write32le(Loc + 5, Val);
-}
-
-void X86TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
-                                   uint64_t Val) const {
-  // Convert
-  //   leal x at tlsgd(, %ebx, 1),
-  //   call __tls_get_addr at plt
-  // to
-  //   movl %gs:0, %eax
-  //   addl x at gotntpoff(%ebx), %eax
-  const uint8_t Inst[] = {
-      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
-      0x03, 0x83, 0x00, 0x00, 0x00, 0x00  // addl 0(%ebx), %eax
-  };
-  memcpy(Loc - 3, Inst, sizeof(Inst));
-  write32le(Loc + 5, Val);
-}
-
-// In some conditions, relocations can be optimized to avoid using GOT.
-// This function does that for Initial Exec to Local Exec case.
-void X86TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
-                                   uint64_t Val) const {
-  // Ulrich's document section 6.2 says that @gotntpoff can
-  // be used with MOVL or ADDL instructions.
-  // @indntpoff is similar to @gotntpoff, but for use in
-  // position dependent code.
-  uint8_t Reg = (Loc[-1] >> 3) & 7;
-
-  if (Type == R_386_TLS_IE) {
-    if (Loc[-1] == 0xa1) {
-      // "movl foo at indntpoff,%eax" -> "movl $foo,%eax"
-      // This case is different from the generic case below because
-      // this is a 5 byte instruction while below is 6 bytes.
-      Loc[-1] = 0xb8;
-    } else if (Loc[-2] == 0x8b) {
-      // "movl foo at indntpoff,%reg" -> "movl $foo,%reg"
-      Loc[-2] = 0xc7;
-      Loc[-1] = 0xc0 | Reg;
-    } else {
-      // "addl foo at indntpoff,%reg" -> "addl $foo,%reg"
-      Loc[-2] = 0x81;
-      Loc[-1] = 0xc0 | Reg;
-    }
-  } else {
-    assert(Type == R_386_TLS_GOTIE);
-    if (Loc[-2] == 0x8b) {
-      // "movl foo at gottpoff(%rip),%reg" -> "movl $foo,%reg"
-      Loc[-2] = 0xc7;
-      Loc[-1] = 0xc0 | Reg;
-    } else {
-      // "addl foo at gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
-      Loc[-2] = 0x8d;
-      Loc[-1] = 0x80 | (Reg << 3) | Reg;
-    }
-  }
-  write32le(Loc, Val);
-}
-
-void X86TargetInfo::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
-                                   uint64_t Val) const {
-  if (Type == R_386_TLS_LDO_32) {
-    write32le(Loc, Val);
-    return;
-  }
-
-  // Convert
-  //   leal foo(%reg),%eax
-  //   call ___tls_get_addr
-  // to
-  //   movl %gs:0,%eax
-  //   nop
-  //   leal 0(%esi,1),%esi
-  const uint8_t Inst[] = {
-      0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
-      0x90,                               // nop
-      0x8d, 0x74, 0x26, 0x00              // leal 0(%esi,1),%esi
-  };
-  memcpy(Loc - 2, Inst, sizeof(Inst));
-}
-
-template <class ELFT> X86_64TargetInfo<ELFT>::X86_64TargetInfo() {
-  CopyRel = R_X86_64_COPY;
-  GotRel = R_X86_64_GLOB_DAT;
-  PltRel = R_X86_64_JUMP_SLOT;
-  RelativeRel = R_X86_64_RELATIVE;
-  IRelativeRel = R_X86_64_IRELATIVE;
-  TlsGotRel = R_X86_64_TPOFF64;
-  TlsModuleIndexRel = R_X86_64_DTPMOD64;
-  TlsOffsetRel = R_X86_64_DTPOFF64;
-  GotEntrySize = 8;
-  GotPltEntrySize = 8;
-  PltEntrySize = 16;
-  PltHeaderSize = 16;
-  TlsGdRelaxSkip = 2;
-  // Align to the large page size (known as a superpage or huge page).
-  // FreeBSD automatically promotes large, superpage-aligned allocations.
-  DefaultImageBase = 0x200000;
-  // 0xCC is the "int3" (call debug exception handler) instruction.
-  TrapInstr = 0xcccccccc;
-}
-
-template <class ELFT>
-RelExpr X86_64TargetInfo<ELFT>::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                           const uint8_t *Loc) const {
-  switch (Type) {
-  case R_X86_64_8:
-  case R_X86_64_16:
-  case R_X86_64_32:
-  case R_X86_64_32S:
-  case R_X86_64_64:
-  case R_X86_64_DTPOFF32:
-  case R_X86_64_DTPOFF64:
-    return R_ABS;
-  case R_X86_64_TPOFF32:
-    return R_TLS;
-  case R_X86_64_TLSLD:
-    return R_TLSLD_PC;
-  case R_X86_64_TLSGD:
-    return R_TLSGD_PC;
-  case R_X86_64_SIZE32:
-  case R_X86_64_SIZE64:
-    return R_SIZE;
-  case R_X86_64_PLT32:
-    return R_PLT_PC;
-  case R_X86_64_PC32:
-  case R_X86_64_PC64:
-    return R_PC;
-  case R_X86_64_GOT32:
-  case R_X86_64_GOT64:
-    return R_GOT_FROM_END;
-  case R_X86_64_GOTPCREL:
-  case R_X86_64_GOTPCRELX:
-  case R_X86_64_REX_GOTPCRELX:
-  case R_X86_64_GOTTPOFF:
-    return R_GOT_PC;
-  case R_X86_64_NONE:
-    return R_NONE;
-  default:
-    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
-    return R_HINT;
-  }
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::writeGotPltHeader(uint8_t *Buf) const {
-  // The first entry holds the value of _DYNAMIC. It is not clear why that is
-  // required, but it is documented in the psabi and the glibc dynamic linker
-  // seems to use it (note that this is relevant for linking ld.so, not any
-  // other program).
-  write64le(Buf, InX::Dynamic->getVA());
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::writeGotPlt(uint8_t *Buf,
-                                         const SymbolBody &S) const {
-  // See comments in X86TargetInfo::writeGotPlt.
-  write32le(Buf, S.getPltVA() + 6);
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
-  const uint8_t PltData[] = {
-      0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // pushq GOTPLT+8(%rip)
-      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *GOTPLT+16(%rip)
-      0x0f, 0x1f, 0x40, 0x00              // nop
-  };
-  memcpy(Buf, PltData, sizeof(PltData));
-  uint64_t GotPlt = InX::GotPlt->getVA();
-  uint64_t Plt = InX::Plt->getVA();
-  write32le(Buf + 2, GotPlt - Plt + 2); // GOTPLT+8
-  write32le(Buf + 8, GotPlt - Plt + 4); // GOTPLT+16
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                                      uint64_t PltEntryAddr, int32_t Index,
-                                      unsigned RelOff) const {
-  const uint8_t Inst[] = {
-      0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmpq *got(%rip)
-      0x68, 0x00, 0x00, 0x00, 0x00,       // pushq <relocation index>
-      0xe9, 0x00, 0x00, 0x00, 0x00        // jmpq plt[0]
-  };
-  memcpy(Buf, Inst, sizeof(Inst));
-
-  write32le(Buf + 2, GotPltEntryAddr - PltEntryAddr - 6);
-  write32le(Buf + 7, Index);
-  write32le(Buf + 12, -Index * PltEntrySize - PltHeaderSize - 16);
-}
-
-template <class ELFT>
-bool X86_64TargetInfo<ELFT>::isPicRel(uint32_t Type) const {
-  return Type != R_X86_64_PC32 && Type != R_X86_64_32 &&
-         Type != R_X86_64_TPOFF32;
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
-                                            uint64_t Val) const {
-  // Convert
-  //   .byte 0x66
-  //   leaq x at tlsgd(%rip), %rdi
-  //   .word 0x6666
-  //   rex64
-  //   call __tls_get_addr at plt
-  // to
-  //   mov %fs:0x0,%rax
-  //   lea x at tpoff,%rax
-  const uint8_t Inst[] = {
-      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
-      0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00              // lea x at tpoff,%rax
-  };
-  memcpy(Loc - 4, Inst, sizeof(Inst));
-
-  // The original code used a pc relative relocation and so we have to
-  // compensate for the -4 in had in the addend.
-  write32le(Loc + 8, Val + 4);
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
-                                            uint64_t Val) const {
-  // Convert
-  //   .byte 0x66
-  //   leaq x at tlsgd(%rip), %rdi
-  //   .word 0x6666
-  //   rex64
-  //   call __tls_get_addr at plt
-  // to
-  //   mov %fs:0x0,%rax
-  //   addq x at tpoff,%rax
-  const uint8_t Inst[] = {
-      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
-      0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00              // addq x at tpoff,%rax
-  };
-  memcpy(Loc - 4, Inst, sizeof(Inst));
-
-  // Both code sequences are PC relatives, but since we are moving the constant
-  // forward by 8 bytes we have to subtract the value by 8.
-  write32le(Loc + 8, Val - 8);
-}
-
-// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
-// R_X86_64_TPOFF32 so that it does not use GOT.
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
-                                            uint64_t Val) const {
-  uint8_t *Inst = Loc - 3;
-  uint8_t Reg = Loc[-1] >> 3;
-  uint8_t *RegSlot = Loc - 1;
-
-  // Note that ADD with RSP or R12 is converted to ADD instead of LEA
-  // because LEA with these registers needs 4 bytes to encode and thus
-  // wouldn't fit the space.
-
-  if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
-    // "addq foo at gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
-    memcpy(Inst, "\x48\x81\xc4", 3);
-  } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
-    // "addq foo at gottpoff(%rip),%r12" -> "addq $foo,%r12"
-    memcpy(Inst, "\x49\x81\xc4", 3);
-  } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
-    // "addq foo at gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
-    memcpy(Inst, "\x4d\x8d", 2);
-    *RegSlot = 0x80 | (Reg << 3) | Reg;
-  } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
-    // "addq foo at gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
-    memcpy(Inst, "\x48\x8d", 2);
-    *RegSlot = 0x80 | (Reg << 3) | Reg;
-  } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
-    // "movq foo at gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
-    memcpy(Inst, "\x49\xc7", 2);
-    *RegSlot = 0xc0 | Reg;
-  } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
-    // "movq foo at gottpoff(%rip),%reg" -> "movq $foo,%reg"
-    memcpy(Inst, "\x48\xc7", 2);
-    *RegSlot = 0xc0 | Reg;
-  } else {
-    error(getErrorLocation(Loc - 3) +
-          "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
-  }
-
-  // The original code used a PC relative relocation.
-  // Need to compensate for the -4 it had in the addend.
-  write32le(Loc, Val + 4);
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxTlsLdToLe(uint8_t *Loc, uint32_t Type,
-                                            uint64_t Val) const {
-  // Convert
-  //   leaq bar at tlsld(%rip), %rdi
-  //   callq __tls_get_addr at PLT
-  //   leaq bar at dtpoff(%rax), %rcx
-  // to
-  //   .word 0x6666
-  //   .byte 0x66
-  //   mov %fs:0,%rax
-  //   leaq bar at tpoff(%rax), %rcx
-  if (Type == R_X86_64_DTPOFF64) {
-    write64le(Loc, Val);
-    return;
-  }
-  if (Type == R_X86_64_DTPOFF32) {
-    write32le(Loc, Val);
-    return;
-  }
-
-  const uint8_t Inst[] = {
-      0x66, 0x66,                                          // .word 0x6666
-      0x66,                                                // .byte 0x66
-      0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
-  };
-  memcpy(Loc - 3, Inst, sizeof(Inst));
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
-                                         uint64_t Val) const {
-  switch (Type) {
-  case R_X86_64_8:
-    checkUInt<8>(Loc, Val, Type);
-    *Loc = Val;
-    break;
-  case R_X86_64_16:
-    checkUInt<16>(Loc, Val, Type);
-    write16le(Loc, Val);
-    break;
-  case R_X86_64_32:
-    checkUInt<32>(Loc, Val, Type);
-    write32le(Loc, Val);
-    break;
-  case R_X86_64_32S:
-  case R_X86_64_TPOFF32:
-  case R_X86_64_GOT32:
-  case R_X86_64_GOTPCREL:
-  case R_X86_64_GOTPCRELX:
-  case R_X86_64_REX_GOTPCRELX:
-  case R_X86_64_PC32:
-  case R_X86_64_GOTTPOFF:
-  case R_X86_64_PLT32:
-  case R_X86_64_TLSGD:
-  case R_X86_64_TLSLD:
-  case R_X86_64_DTPOFF32:
-  case R_X86_64_SIZE32:
-    checkInt<32>(Loc, Val, Type);
-    write32le(Loc, Val);
-    break;
-  case R_X86_64_64:
-  case R_X86_64_DTPOFF64:
-  case R_X86_64_GLOB_DAT:
-  case R_X86_64_PC64:
-  case R_X86_64_SIZE64:
-  case R_X86_64_GOT64:
-    write64le(Loc, Val);
-    break;
-  default:
-    llvm_unreachable("unexpected relocation");
-  }
-}
-
-template <class ELFT>
-RelExpr X86_64TargetInfo<ELFT>::adjustRelaxExpr(uint32_t Type,
-                                                const uint8_t *Data,
-                                                RelExpr RelExpr) const {
-  if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
-    return RelExpr;
-  const uint8_t Op = Data[-2];
-  const uint8_t ModRm = Data[-1];
-
-  // FIXME: When PIC is disabled and foo is defined locally in the
-  // lower 32 bit address space, memory operand in mov can be converted into
-  // immediate operand. Otherwise, mov must be changed to lea. We support only
-  // latter relaxation at this moment.
-  if (Op == 0x8b)
-    return R_RELAX_GOT_PC;
-
-  // Relax call and jmp.
-  if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
-    return R_RELAX_GOT_PC;
-
-  // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
-  // If PIC then no relaxation is available.
-  // We also don't relax test/binop instructions without REX byte,
-  // they are 32bit operations and not common to have.
-  assert(Type == R_X86_64_REX_GOTPCRELX);
-  return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
-}
-
-// A subset of relaxations can only be applied for no-PIC. This method
-// handles such relaxations. Instructions encoding information was taken from:
-// "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
-// (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
-//    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val,
-                                           uint8_t Op, uint8_t ModRm) const {
-  const uint8_t Rex = Loc[-3];
-  // Convert "test %reg, foo at GOTPCREL(%rip)" to "test $foo, %reg".
-  if (Op == 0x85) {
-    // See "TEST-Logical Compare" (4-428 Vol. 2B),
-    // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
-
-    // ModR/M byte has form XX YYY ZZZ, where
-    // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
-    // XX has different meanings:
-    // 00: The operand's memory address is in reg1.
-    // 01: The operand's memory address is reg1 + a byte-sized displacement.
-    // 10: The operand's memory address is reg1 + a word-sized displacement.
-    // 11: The operand is reg1 itself.
-    // If an instruction requires only one operand, the unused reg2 field
-    // holds extra opcode bits rather than a register code
-    // 0xC0 == 11 000 000 binary.
-    // 0x38 == 00 111 000 binary.
-    // We transfer reg2 to reg1 here as operand.
-    // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
-    Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
-
-    // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
-    // See "TEST-Logical Compare" (4-428 Vol. 2B).
-    Loc[-2] = 0xf7;
-
-    // Move R bit to the B bit in REX byte.
-    // REX byte is encoded as 0100WRXB, where
-    // 0100 is 4bit fixed pattern.
-    // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
-    //   default operand size is used (which is 32-bit for most but not all
-    //   instructions).
-    // REX.R This 1-bit value is an extension to the MODRM.reg field.
-    // REX.X This 1-bit value is an extension to the SIB.index field.
-    // REX.B This 1-bit value is an extension to the MODRM.rm field or the
-    // SIB.base field.
-    // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
-    Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
-    write32le(Loc, Val);
-    return;
-  }
-
-  // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
-  // or xor operations.
-
-  // Convert "binop foo at GOTPCREL(%rip), %reg" to "binop $foo, %reg".
-  // Logic is close to one for test instruction above, but we also
-  // write opcode extension here, see below for details.
-  Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
-
-  // Primary opcode is 0x81, opcode extension is one of:
-  // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
-  // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
-  // This value was wrote to MODRM.reg in a line above.
-  // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
-  // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
-  // descriptions about each operation.
-  Loc[-2] = 0x81;
-  Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
-  write32le(Loc, Val);
-}
-
-template <class ELFT>
-void X86_64TargetInfo<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const {
-  const uint8_t Op = Loc[-2];
-  const uint8_t ModRm = Loc[-1];
-
-  // Convert "mov foo at GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
-  if (Op == 0x8b) {
-    Loc[-2] = 0x8d;
-    write32le(Loc, Val);
-    return;
-  }
-
-  if (Op != 0xff) {
-    // We are relaxing a rip relative to an absolute, so compensate
-    // for the old -4 addend.
-    assert(!Config->Pic);
-    relaxGotNoPic(Loc, Val + 4, Op, ModRm);
-    return;
-  }
-
-  // Convert call/jmp instructions.
-  if (ModRm == 0x15) {
-    // ABI says we can convert "call *foo at GOTPCREL(%rip)" to "nop; call foo".
-    // Instead we convert to "addr32 call foo" where addr32 is an instruction
-    // prefix. That makes result expression to be a single instruction.
-    Loc[-2] = 0x67; // addr32 prefix
-    Loc[-1] = 0xe8; // call
-    write32le(Loc, Val);
-    return;
-  }
-
-  // Convert "jmp *foo at GOTPCREL(%rip)" to "jmp foo; nop".
-  // jmp doesn't return, so it is fine to use nop here, it is just a stub.
-  assert(ModRm == 0x25);
-  Loc[-2] = 0xe9; // jmp
-  Loc[3] = 0x90;  // nop
-  write32le(Loc - 1, Val + 1);
-}
-
-// Relocation masks following the #lo(value), #hi(value), #ha(value),
-// #higher(value), #highera(value), #highest(value), and #highesta(value)
-// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
-// document.
-static uint16_t applyPPCLo(uint64_t V) { return V; }
-static uint16_t applyPPCHi(uint64_t V) { return V >> 16; }
-static uint16_t applyPPCHa(uint64_t V) { return (V + 0x8000) >> 16; }
-static uint16_t applyPPCHigher(uint64_t V) { return V >> 32; }
-static uint16_t applyPPCHighera(uint64_t V) { return (V + 0x8000) >> 32; }
-static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
-static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
-
-PPCTargetInfo::PPCTargetInfo() {}
-
-void PPCTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                uint64_t Val) const {
-  switch (Type) {
-  case R_PPC_ADDR16_HA:
-    write16be(Loc, applyPPCHa(Val));
-    break;
-  case R_PPC_ADDR16_LO:
-    write16be(Loc, applyPPCLo(Val));
-    break;
-  case R_PPC_ADDR32:
-  case R_PPC_REL32:
-    write32be(Loc, Val);
-    break;
-  case R_PPC_REL24:
-    or32be(Loc, Val & 0x3FFFFFC);
-    break;
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-RelExpr PPCTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                  const uint8_t *Loc) const {
-  switch (Type) {
-  case R_PPC_REL24:
-  case R_PPC_REL32:
-    return R_PC;
-  default:
-    return R_ABS;
-  }
-}
-
-PPC64TargetInfo::PPC64TargetInfo() {
-  PltRel = GotRel = R_PPC64_GLOB_DAT;
-  RelativeRel = R_PPC64_RELATIVE;
-  GotEntrySize = 8;
-  GotPltEntrySize = 8;
-  PltEntrySize = 32;
-  PltHeaderSize = 0;
-
-  // We need 64K pages (at least under glibc/Linux, the loader won't
-  // set different permissions on a finer granularity than that).
-  DefaultMaxPageSize = 65536;
-
-  // The PPC64 ELF ABI v1 spec, says:
-  //
-  //   It is normally desirable to put segments with different characteristics
-  //   in separate 256 Mbyte portions of the address space, to give the
-  //   operating system full paging flexibility in the 64-bit address space.
-  //
-  // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
-  // use 0x10000000 as the starting address.
-  DefaultImageBase = 0x10000000;
-}
-
-static uint64_t PPC64TocOffset = 0x8000;
-
-uint64_t getPPC64TocBase() {
-  // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The
-  // TOC starts where the first of these sections starts. We always create a
-  // .got when we see a relocation that uses it, so for us the start is always
-  // the .got.
-  uint64_t TocVA = InX::Got->getVA();
-
-  // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
-  // thus permitting a full 64 Kbytes segment. Note that the glibc startup
-  // code (crt1.o) assumes that you can get from the TOC base to the
-  // start of the .toc section with only a single (signed) 16-bit relocation.
-  return TocVA + PPC64TocOffset;
-}
-
-RelExpr PPC64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                    const uint8_t *Loc) const {
-  switch (Type) {
-  default:
-    return R_ABS;
-  case R_PPC64_TOC16:
-  case R_PPC64_TOC16_DS:
-  case R_PPC64_TOC16_HA:
-  case R_PPC64_TOC16_HI:
-  case R_PPC64_TOC16_LO:
-  case R_PPC64_TOC16_LO_DS:
-    return R_GOTREL;
-  case R_PPC64_TOC:
-    return R_PPC_TOC;
-  case R_PPC64_REL24:
-    return R_PPC_PLT_OPD;
-  }
-}
-
-void PPC64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                               uint64_t PltEntryAddr, int32_t Index,
-                               unsigned RelOff) const {
-  uint64_t Off = GotPltEntryAddr - getPPC64TocBase();
-
-  // FIXME: What we should do, in theory, is get the offset of the function
-  // descriptor in the .opd section, and use that as the offset from %r2 (the
-  // TOC-base pointer). Instead, we have the GOT-entry offset, and that will
-  // be a pointer to the function descriptor in the .opd section. Using
-  // this scheme is simpler, but requires an extra indirection per PLT dispatch.
-
-  write32be(Buf, 0xf8410028);                       // std %r2, 40(%r1)
-  write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X at ha
-  write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X at l(%r11)
-  write32be(Buf + 12, 0xe96c0000);                  // ld %r11,0(%r12)
-  write32be(Buf + 16, 0x7d6903a6);                  // mtctr %r11
-  write32be(Buf + 20, 0xe84c0008);                  // ld %r2,8(%r12)
-  write32be(Buf + 24, 0xe96c0010);                  // ld %r11,16(%r12)
-  write32be(Buf + 28, 0x4e800420);                  // bctr
-}
-
-static std::pair<uint32_t, uint64_t> toAddr16Rel(uint32_t Type, uint64_t Val) {
-  uint64_t V = Val - PPC64TocOffset;
-  switch (Type) {
-  case R_PPC64_TOC16:
-    return {R_PPC64_ADDR16, V};
-  case R_PPC64_TOC16_DS:
-    return {R_PPC64_ADDR16_DS, V};
-  case R_PPC64_TOC16_HA:
-    return {R_PPC64_ADDR16_HA, V};
-  case R_PPC64_TOC16_HI:
-    return {R_PPC64_ADDR16_HI, V};
-  case R_PPC64_TOC16_LO:
-    return {R_PPC64_ADDR16_LO, V};
-  case R_PPC64_TOC16_LO_DS:
-    return {R_PPC64_ADDR16_LO_DS, V};
-  default:
-    return {Type, Val};
-  }
-}
-
-void PPC64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                  uint64_t Val) const {
-  // For a TOC-relative relocation, proceed in terms of the corresponding
-  // ADDR16 relocation type.
-  std::tie(Type, Val) = toAddr16Rel(Type, Val);
-
-  switch (Type) {
-  case R_PPC64_ADDR14: {
-    checkAlignment<4>(Loc, Val, Type);
-    // Preserve the AA/LK bits in the branch instruction
-    uint8_t AALK = Loc[3];
-    write16be(Loc + 2, (AALK & 3) | (Val & 0xfffc));
-    break;
-  }
-  case R_PPC64_ADDR16:
-    checkInt<16>(Loc, Val, Type);
-    write16be(Loc, Val);
-    break;
-  case R_PPC64_ADDR16_DS:
-    checkInt<16>(Loc, Val, Type);
-    write16be(Loc, (read16be(Loc) & 3) | (Val & ~3));
-    break;
-  case R_PPC64_ADDR16_HA:
-  case R_PPC64_REL16_HA:
-    write16be(Loc, applyPPCHa(Val));
-    break;
-  case R_PPC64_ADDR16_HI:
-  case R_PPC64_REL16_HI:
-    write16be(Loc, applyPPCHi(Val));
-    break;
-  case R_PPC64_ADDR16_HIGHER:
-    write16be(Loc, applyPPCHigher(Val));
-    break;
-  case R_PPC64_ADDR16_HIGHERA:
-    write16be(Loc, applyPPCHighera(Val));
-    break;
-  case R_PPC64_ADDR16_HIGHEST:
-    write16be(Loc, applyPPCHighest(Val));
-    break;
-  case R_PPC64_ADDR16_HIGHESTA:
-    write16be(Loc, applyPPCHighesta(Val));
-    break;
-  case R_PPC64_ADDR16_LO:
-    write16be(Loc, applyPPCLo(Val));
-    break;
-  case R_PPC64_ADDR16_LO_DS:
-  case R_PPC64_REL16_LO:
-    write16be(Loc, (read16be(Loc) & 3) | (applyPPCLo(Val) & ~3));
-    break;
-  case R_PPC64_ADDR32:
-  case R_PPC64_REL32:
-    checkInt<32>(Loc, Val, Type);
-    write32be(Loc, Val);
-    break;
-  case R_PPC64_ADDR64:
-  case R_PPC64_REL64:
-  case R_PPC64_TOC:
-    write64be(Loc, Val);
-    break;
-  case R_PPC64_REL24: {
-    uint32_t Mask = 0x03FFFFFC;
-    checkInt<24>(Loc, Val, Type);
-    write32be(Loc, (read32be(Loc) & ~Mask) | (Val & Mask));
-    break;
-  }
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-AArch64TargetInfo::AArch64TargetInfo() {
-  CopyRel = R_AARCH64_COPY;
-  RelativeRel = R_AARCH64_RELATIVE;
-  IRelativeRel = R_AARCH64_IRELATIVE;
-  GotRel = R_AARCH64_GLOB_DAT;
-  PltRel = R_AARCH64_JUMP_SLOT;
-  TlsDescRel = R_AARCH64_TLSDESC;
-  TlsGotRel = R_AARCH64_TLS_TPREL64;
-  GotEntrySize = 8;
-  GotPltEntrySize = 8;
-  PltEntrySize = 16;
-  PltHeaderSize = 32;
-  DefaultMaxPageSize = 65536;
-
-  // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant
-  // 1 of the tls structures and the tcb size is 16.
-  TcbSize = 16;
-}
-
-RelExpr AArch64TargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                      const uint8_t *Loc) const {
-  switch (Type) {
-  default:
-    return R_ABS;
-  case R_AARCH64_TLSDESC_ADR_PAGE21:
-    return R_TLSDESC_PAGE;
-  case R_AARCH64_TLSDESC_LD64_LO12:
-  case R_AARCH64_TLSDESC_ADD_LO12:
-    return R_TLSDESC;
-  case R_AARCH64_TLSDESC_CALL:
-    return R_TLSDESC_CALL;
-  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
-  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
-    return R_TLS;
-  case R_AARCH64_CALL26:
-  case R_AARCH64_CONDBR19:
-  case R_AARCH64_JUMP26:
-  case R_AARCH64_TSTBR14:
-    return R_PLT_PC;
-  case R_AARCH64_PREL16:
-  case R_AARCH64_PREL32:
-  case R_AARCH64_PREL64:
-  case R_AARCH64_ADR_PREL_LO21:
-    return R_PC;
-  case R_AARCH64_ADR_PREL_PG_HI21:
-    return R_PAGE_PC;
-  case R_AARCH64_LD64_GOT_LO12_NC:
-  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
-    return R_GOT;
-  case R_AARCH64_ADR_GOT_PAGE:
-  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
-    return R_GOT_PAGE_PC;
-  case R_AARCH64_NONE:
-    return R_NONE;
-  }
-}
-
-RelExpr AArch64TargetInfo::adjustRelaxExpr(uint32_t Type, const uint8_t *Data,
-                                           RelExpr Expr) const {
-  if (Expr == R_RELAX_TLS_GD_TO_IE) {
-    if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
-      return R_RELAX_TLS_GD_TO_IE_PAGE_PC;
-    return R_RELAX_TLS_GD_TO_IE_ABS;
-  }
-  return Expr;
-}
-
-bool AArch64TargetInfo::usesOnlyLowPageBits(uint32_t Type) const {
-  switch (Type) {
-  default:
-    return false;
-  case R_AARCH64_ADD_ABS_LO12_NC:
-  case R_AARCH64_LD64_GOT_LO12_NC:
-  case R_AARCH64_LDST128_ABS_LO12_NC:
-  case R_AARCH64_LDST16_ABS_LO12_NC:
-  case R_AARCH64_LDST32_ABS_LO12_NC:
-  case R_AARCH64_LDST64_ABS_LO12_NC:
-  case R_AARCH64_LDST8_ABS_LO12_NC:
-  case R_AARCH64_TLSDESC_ADD_LO12:
-  case R_AARCH64_TLSDESC_LD64_LO12:
-  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
-    return true;
-  }
-}
-
-bool AArch64TargetInfo::isPicRel(uint32_t Type) const {
-  return Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64;
-}
-
-void AArch64TargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
-  write64le(Buf, InX::Plt->getVA());
-}
-
-// Page(Expr) is the page address of the expression Expr, defined
-// as (Expr & ~0xFFF). (This applies even if the machine page size
-// supported by the platform has a different value.)
-uint64_t getAArch64Page(uint64_t Expr) {
-  return Expr & (~static_cast<uint64_t>(0xFFF));
-}
-
-void AArch64TargetInfo::writePltHeader(uint8_t *Buf) const {
-  const uint8_t PltData[] = {
-      0xf0, 0x7b, 0xbf, 0xa9, // stp	x16, x30, [sp,#-16]!
-      0x10, 0x00, 0x00, 0x90, // adrp	x16, Page(&(.plt.got[2]))
-      0x11, 0x02, 0x40, 0xf9, // ldr	x17, [x16, Offset(&(.plt.got[2]))]
-      0x10, 0x02, 0x00, 0x91, // add	x16, x16, Offset(&(.plt.got[2]))
-      0x20, 0x02, 0x1f, 0xd6, // br	x17
-      0x1f, 0x20, 0x03, 0xd5, // nop
-      0x1f, 0x20, 0x03, 0xd5, // nop
-      0x1f, 0x20, 0x03, 0xd5  // nop
-  };
-  memcpy(Buf, PltData, sizeof(PltData));
-
-  uint64_t Got = InX::GotPlt->getVA();
-  uint64_t Plt = InX::Plt->getVA();
-  relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
-              getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
-  relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
-  relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
-}
-
-void AArch64TargetInfo::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                                 uint64_t PltEntryAddr, int32_t Index,
-                                 unsigned RelOff) const {
-  const uint8_t Inst[] = {
-      0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
-      0x11, 0x02, 0x40, 0xf9, // ldr  x17, [x16, Offset(&(.plt.got[n]))]
-      0x10, 0x02, 0x00, 0x91, // add  x16, x16, Offset(&(.plt.got[n]))
-      0x20, 0x02, 0x1f, 0xd6  // br   x17
-  };
-  memcpy(Buf, Inst, sizeof(Inst));
-
-  relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
-              getAArch64Page(GotPltEntryAddr) - getAArch64Page(PltEntryAddr));
-  relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotPltEntryAddr);
-  relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotPltEntryAddr);
-}
-
-static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
-  uint32_t ImmLo = (Imm & 0x3) << 29;
-  uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
-  uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
-  write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
-}
-
-// Return the bits [Start, End] from Val shifted Start bits.
-// For instance, getBits(0xF0, 4, 8) returns 0xF.
-static uint64_t getBits(uint64_t Val, int Start, int End) {
-  uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
-  return (Val >> Start) & Mask;
-}
-
-// Update the immediate field in a AARCH64 ldr, str, and add instruction.
-static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
-  or32le(L, (Imm & 0xFFF) << 10);
-}
-
-void AArch64TargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                    uint64_t Val) const {
-  switch (Type) {
-  case R_AARCH64_ABS16:
-  case R_AARCH64_PREL16:
-    checkIntUInt<16>(Loc, Val, Type);
-    write16le(Loc, Val);
-    break;
-  case R_AARCH64_ABS32:
-  case R_AARCH64_PREL32:
-    checkIntUInt<32>(Loc, Val, Type);
-    write32le(Loc, Val);
-    break;
-  case R_AARCH64_ABS64:
-  case R_AARCH64_GLOB_DAT:
-  case R_AARCH64_PREL64:
-    write64le(Loc, Val);
-    break;
-  case R_AARCH64_ADD_ABS_LO12_NC:
-    or32AArch64Imm(Loc, Val);
-    break;
-  case R_AARCH64_ADR_GOT_PAGE:
-  case R_AARCH64_ADR_PREL_PG_HI21:
-  case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
-  case R_AARCH64_TLSDESC_ADR_PAGE21:
-    checkInt<33>(Loc, Val, Type);
-    write32AArch64Addr(Loc, Val >> 12);
-    break;
-  case R_AARCH64_ADR_PREL_LO21:
-    checkInt<21>(Loc, Val, Type);
-    write32AArch64Addr(Loc, Val);
-    break;
-  case R_AARCH64_CALL26:
-  case R_AARCH64_JUMP26:
-    checkInt<28>(Loc, Val, Type);
-    or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
-    break;
-  case R_AARCH64_CONDBR19:
-    checkInt<21>(Loc, Val, Type);
-    or32le(Loc, (Val & 0x1FFFFC) << 3);
-    break;
-  case R_AARCH64_LD64_GOT_LO12_NC:
-  case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
-  case R_AARCH64_TLSDESC_LD64_LO12:
-    checkAlignment<8>(Loc, Val, Type);
-    or32le(Loc, (Val & 0xFF8) << 7);
-    break;
-  case R_AARCH64_LDST8_ABS_LO12_NC:
-    or32AArch64Imm(Loc, getBits(Val, 0, 11));
-    break;
-  case R_AARCH64_LDST16_ABS_LO12_NC:
-    or32AArch64Imm(Loc, getBits(Val, 1, 11));
-    break;
-  case R_AARCH64_LDST32_ABS_LO12_NC:
-    or32AArch64Imm(Loc, getBits(Val, 2, 11));
-    break;
-  case R_AARCH64_LDST64_ABS_LO12_NC:
-    or32AArch64Imm(Loc, getBits(Val, 3, 11));
-    break;
-  case R_AARCH64_LDST128_ABS_LO12_NC:
-    or32AArch64Imm(Loc, getBits(Val, 4, 11));
-    break;
-  case R_AARCH64_MOVW_UABS_G0_NC:
-    or32le(Loc, (Val & 0xFFFF) << 5);
-    break;
-  case R_AARCH64_MOVW_UABS_G1_NC:
-    or32le(Loc, (Val & 0xFFFF0000) >> 11);
-    break;
-  case R_AARCH64_MOVW_UABS_G2_NC:
-    or32le(Loc, (Val & 0xFFFF00000000) >> 27);
-    break;
-  case R_AARCH64_MOVW_UABS_G3:
-    or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
-    break;
-  case R_AARCH64_TSTBR14:
-    checkInt<16>(Loc, Val, Type);
-    or32le(Loc, (Val & 0xFFFC) << 3);
-    break;
-  case R_AARCH64_TLSLE_ADD_TPREL_HI12:
-    checkInt<24>(Loc, Val, Type);
-    or32AArch64Imm(Loc, Val >> 12);
-    break;
-  case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
-  case R_AARCH64_TLSDESC_ADD_LO12:
-    or32AArch64Imm(Loc, Val);
-    break;
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-void AArch64TargetInfo::relaxTlsGdToLe(uint8_t *Loc, uint32_t Type,
-                                       uint64_t Val) const {
-  // TLSDESC Global-Dynamic relocation are in the form:
-  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
-  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
-  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
-  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
-  //   blr     x1
-  // And it can optimized to:
-  //   movz    x0, #0x0, lsl #16
-  //   movk    x0, #0x10
-  //   nop
-  //   nop
-  checkUInt<32>(Loc, Val, Type);
-
-  switch (Type) {
-  case R_AARCH64_TLSDESC_ADD_LO12:
-  case R_AARCH64_TLSDESC_CALL:
-    write32le(Loc, 0xd503201f); // nop
-    return;
-  case R_AARCH64_TLSDESC_ADR_PAGE21:
-    write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
-    return;
-  case R_AARCH64_TLSDESC_LD64_LO12:
-    write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
-    return;
-  default:
-    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
-  }
-}
-
-void AArch64TargetInfo::relaxTlsGdToIe(uint8_t *Loc, uint32_t Type,
-                                       uint64_t Val) const {
-  // TLSDESC Global-Dynamic relocation are in the form:
-  //   adrp    x0, :tlsdesc:v             [R_AARCH64_TLSDESC_ADR_PAGE21]
-  //   ldr     x1, [x0, #:tlsdesc_lo12:v  [R_AARCH64_TLSDESC_LD64_LO12]
-  //   add     x0, x0, :tlsdesc_los:v     [R_AARCH64_TLSDESC_ADD_LO12]
-  //   .tlsdesccall                       [R_AARCH64_TLSDESC_CALL]
-  //   blr     x1
-  // And it can optimized to:
-  //   adrp    x0, :gottprel:v
-  //   ldr     x0, [x0, :gottprel_lo12:v]
-  //   nop
-  //   nop
-
-  switch (Type) {
-  case R_AARCH64_TLSDESC_ADD_LO12:
-  case R_AARCH64_TLSDESC_CALL:
-    write32le(Loc, 0xd503201f); // nop
-    break;
-  case R_AARCH64_TLSDESC_ADR_PAGE21:
-    write32le(Loc, 0x90000000); // adrp
-    relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
-    break;
-  case R_AARCH64_TLSDESC_LD64_LO12:
-    write32le(Loc, 0xf9400000); // ldr
-    relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
-    break;
-  default:
-    llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
-  }
-}
-
-void AArch64TargetInfo::relaxTlsIeToLe(uint8_t *Loc, uint32_t Type,
-                                       uint64_t Val) const {
-  checkUInt<32>(Loc, Val, Type);
-
-  if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
-    // Generate MOVZ.
-    uint32_t RegNo = read32le(Loc) & 0x1f;
-    write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
-    return;
-  }
-  if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
-    // Generate MOVK.
-    uint32_t RegNo = read32le(Loc) & 0x1f;
-    write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
-    return;
-  }
-  llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
-}
-
-AMDGPUTargetInfo::AMDGPUTargetInfo() {
-  RelativeRel = R_AMDGPU_REL64;
-  GotRel = R_AMDGPU_ABS64;
-  GotEntrySize = 8;
-}
-
-void AMDGPUTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                   uint64_t Val) const {
-  switch (Type) {
-  case R_AMDGPU_ABS32:
-  case R_AMDGPU_GOTPCREL:
-  case R_AMDGPU_GOTPCREL32_LO:
-  case R_AMDGPU_REL32:
-  case R_AMDGPU_REL32_LO:
-    write32le(Loc, Val);
-    break;
-  case R_AMDGPU_ABS64:
-    write64le(Loc, Val);
-    break;
-  case R_AMDGPU_GOTPCREL32_HI:
-  case R_AMDGPU_REL32_HI:
-    write32le(Loc, Val >> 32);
-    break;
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-RelExpr AMDGPUTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                     const uint8_t *Loc) const {
-  switch (Type) {
-  case R_AMDGPU_ABS32:
-  case R_AMDGPU_ABS64:
-    return R_ABS;
-  case R_AMDGPU_REL32:
-  case R_AMDGPU_REL32_LO:
-  case R_AMDGPU_REL32_HI:
-    return R_PC;
-  case R_AMDGPU_GOTPCREL:
-  case R_AMDGPU_GOTPCREL32_LO:
-  case R_AMDGPU_GOTPCREL32_HI:
-    return R_GOT_PC;
-  default:
-    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
-    return R_HINT;
-  }
-}
-
-ARMTargetInfo::ARMTargetInfo() {
-  CopyRel = R_ARM_COPY;
-  RelativeRel = R_ARM_RELATIVE;
-  IRelativeRel = R_ARM_IRELATIVE;
-  GotRel = R_ARM_GLOB_DAT;
-  PltRel = R_ARM_JUMP_SLOT;
-  TlsGotRel = R_ARM_TLS_TPOFF32;
-  TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
-  TlsOffsetRel = R_ARM_TLS_DTPOFF32;
-  GotEntrySize = 4;
-  GotPltEntrySize = 4;
-  PltEntrySize = 16;
-  PltHeaderSize = 20;
-  // ARM uses Variant 1 TLS
-  TcbSize = 8;
-  NeedsThunks = true;
-}
-
-RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                  const uint8_t *Loc) const {
-  switch (Type) {
-  default:
-    return R_ABS;
-  case R_ARM_THM_JUMP11:
-    return R_PC;
-  case R_ARM_CALL:
-  case R_ARM_JUMP24:
-  case R_ARM_PC24:
-  case R_ARM_PLT32:
-  case R_ARM_PREL31:
-  case R_ARM_THM_JUMP19:
-  case R_ARM_THM_JUMP24:
-  case R_ARM_THM_CALL:
-    return R_PLT_PC;
-  case R_ARM_GOTOFF32:
-    // (S + A) - GOT_ORG
-    return R_GOTREL;
-  case R_ARM_GOT_BREL:
-    // GOT(S) + A - GOT_ORG
-    return R_GOT_OFF;
-  case R_ARM_GOT_PREL:
-  case R_ARM_TLS_IE32:
-    // GOT(S) + A - P
-    return R_GOT_PC;
-  case R_ARM_SBREL32:
-    return R_ARM_SBREL;
-  case R_ARM_TARGET1:
-    return Config->Target1Rel ? R_PC : R_ABS;
-  case R_ARM_TARGET2:
-    if (Config->Target2 == Target2Policy::Rel)
-      return R_PC;
-    if (Config->Target2 == Target2Policy::Abs)
-      return R_ABS;
-    return R_GOT_PC;
-  case R_ARM_TLS_GD32:
-    return R_TLSGD_PC;
-  case R_ARM_TLS_LDM32:
-    return R_TLSLD_PC;
-  case R_ARM_BASE_PREL:
-    // B(S) + A - P
-    // FIXME: currently B(S) assumed to be .got, this may not hold for all
-    // platforms.
-    return R_GOTONLY_PC;
-  case R_ARM_MOVW_PREL_NC:
-  case R_ARM_MOVT_PREL:
-  case R_ARM_REL32:
-  case R_ARM_THM_MOVW_PREL_NC:
-  case R_ARM_THM_MOVT_PREL:
-    return R_PC;
-  case R_ARM_NONE:
-    return R_NONE;
-  case R_ARM_TLS_LE32:
-    return R_TLS;
-  }
-}
-
-bool ARMTargetInfo::isPicRel(uint32_t Type) const {
-  return (Type == R_ARM_TARGET1 && !Config->Target1Rel) ||
-         (Type == R_ARM_ABS32);
-}
-
-uint32_t ARMTargetInfo::getDynRel(uint32_t Type) const {
-  if (Type == R_ARM_TARGET1 && !Config->Target1Rel)
-    return R_ARM_ABS32;
-  if (Type == R_ARM_ABS32)
-    return Type;
-  // Keep it going with a dummy value so that we can find more reloc errors.
-  return R_ARM_ABS32;
-}
-
-void ARMTargetInfo::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
-  write32le(Buf, InX::Plt->getVA());
-}
-
-void ARMTargetInfo::writeIgotPlt(uint8_t *Buf, const SymbolBody &S) const {
-  // An ARM entry is the address of the ifunc resolver function.
-  write32le(Buf, S.getVA());
-}
-
-void ARMTargetInfo::writePltHeader(uint8_t *Buf) const {
-  const uint8_t PltData[] = {
-      0x04, 0xe0, 0x2d, 0xe5, //     str lr, [sp,#-4]!
-      0x04, 0xe0, 0x9f, 0xe5, //     ldr lr, L2
-      0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
-      0x08, 0xf0, 0xbe, 0xe5, //     ldr pc, [lr, #8]
-      0x00, 0x00, 0x00, 0x00, // L2: .word   &(.got.plt) - L1 - 8
-  };
-  memcpy(Buf, PltData, sizeof(PltData));
-  uint64_t GotPlt = InX::GotPlt->getVA();
-  uint64_t L1 = InX::Plt->getVA() + 8;
-  write32le(Buf + 16, GotPlt - L1 - 8);
-}
-
-void ARMTargetInfo::addPltHeaderSymbols(InputSectionBase *ISD) const {
-  auto *IS = cast<InputSection>(ISD);
-  addSyntheticLocal("$a", STT_NOTYPE, 0, 0, IS);
-  addSyntheticLocal("$d", STT_NOTYPE, 16, 0, IS);
-}
-
-void ARMTargetInfo::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                             uint64_t PltEntryAddr, int32_t Index,
-                             unsigned RelOff) const {
-  // FIXME: Using simple code sequence with simple relocations.
-  // There is a more optimal sequence but it requires support for the group
-  // relocations. See ELF for the ARM Architecture Appendix A.3
-  const uint8_t PltData[] = {
-      0x04, 0xc0, 0x9f, 0xe5, //     ldr ip, L2
-      0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
-      0x00, 0xf0, 0x9c, 0xe5, //     ldr pc, [ip]
-      0x00, 0x00, 0x00, 0x00, // L2: .word   Offset(&(.plt.got) - L1 - 8
-  };
-  memcpy(Buf, PltData, sizeof(PltData));
-  uint64_t L1 = PltEntryAddr + 4;
-  write32le(Buf + 12, GotPltEntryAddr - L1 - 8);
-}
-
-void ARMTargetInfo::addPltSymbols(InputSectionBase *ISD, uint64_t Off) const {
-  auto *IS = cast<InputSection>(ISD);
-  addSyntheticLocal("$a", STT_NOTYPE, Off, 0, IS);
-  addSyntheticLocal("$d", STT_NOTYPE, Off + 12, 0, IS);
-}
-
-bool ARMTargetInfo::needsThunk(RelExpr Expr, uint32_t RelocType,
-                               const InputFile *File,
-                               const SymbolBody &S) const {
-  // If S is an undefined weak symbol in an executable we don't need a Thunk.
-  // In a DSO calls to undefined symbols, including weak ones get PLT entries
-  // which may need a thunk.
-  if (S.isUndefined() && !S.isLocal() && S.symbol()->isWeak() &&
-      !Config->Shared)
-    return false;
-  // A state change from ARM to Thumb and vice versa must go through an
-  // interworking thunk if the relocation type is not R_ARM_CALL or
-  // R_ARM_THM_CALL.
-  switch (RelocType) {
-  case R_ARM_PC24:
-  case R_ARM_PLT32:
-  case R_ARM_JUMP24:
-    // Source is ARM, all PLT entries are ARM so no interworking required.
-    // Otherwise we need to interwork if Symbol has bit 0 set (Thumb).
-    if (Expr == R_PC && ((S.getVA() & 1) == 1))
-      return true;
-    break;
-  case R_ARM_THM_JUMP19:
-  case R_ARM_THM_JUMP24:
-    // Source is Thumb, all PLT entries are ARM so interworking is required.
-    // Otherwise we need to interwork if Symbol has bit 0 clear (ARM).
-    if (Expr == R_PLT_PC || ((S.getVA() & 1) == 0))
-      return true;
-    break;
-  }
-  return false;
-}
-
-void ARMTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                uint64_t Val) const {
-  switch (Type) {
-  case R_ARM_ABS32:
-  case R_ARM_BASE_PREL:
-  case R_ARM_GLOB_DAT:
-  case R_ARM_GOTOFF32:
-  case R_ARM_GOT_BREL:
-  case R_ARM_GOT_PREL:
-  case R_ARM_REL32:
-  case R_ARM_RELATIVE:
-  case R_ARM_SBREL32:
-  case R_ARM_TARGET1:
-  case R_ARM_TARGET2:
-  case R_ARM_TLS_GD32:
-  case R_ARM_TLS_IE32:
-  case R_ARM_TLS_LDM32:
-  case R_ARM_TLS_LDO32:
-  case R_ARM_TLS_LE32:
-  case R_ARM_TLS_TPOFF32:
-  case R_ARM_TLS_DTPOFF32:
-    write32le(Loc, Val);
-    break;
-  case R_ARM_TLS_DTPMOD32:
-    write32le(Loc, 1);
-    break;
-  case R_ARM_PREL31:
-    checkInt<31>(Loc, Val, Type);
-    write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
-    break;
-  case R_ARM_CALL:
-    // R_ARM_CALL is used for BL and BLX instructions, depending on the
-    // value of bit 0 of Val, we must select a BL or BLX instruction
-    if (Val & 1) {
-      // If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
-      // The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
-      checkInt<26>(Loc, Val, Type);
-      write32le(Loc, 0xfa000000 |                    // opcode
-                         ((Val & 2) << 23) |         // H
-                         ((Val >> 2) & 0x00ffffff)); // imm24
-      break;
-    }
-    if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
-      // BLX (always unconditional) instruction to an ARM Target, select an
-      // unconditional BL.
-      write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
-    // fall through as BL encoding is shared with B
-    LLVM_FALLTHROUGH;
-  case R_ARM_JUMP24:
-  case R_ARM_PC24:
-  case R_ARM_PLT32:
-    checkInt<26>(Loc, Val, Type);
-    write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
-    break;
-  case R_ARM_THM_JUMP11:
-    checkInt<12>(Loc, Val, Type);
-    write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
-    break;
-  case R_ARM_THM_JUMP19:
-    // Encoding T3: Val = S:J2:J1:imm6:imm11:0
-    checkInt<21>(Loc, Val, Type);
-    write16le(Loc,
-              (read16le(Loc) & 0xfbc0) |   // opcode cond
-                  ((Val >> 10) & 0x0400) | // S
-                  ((Val >> 12) & 0x003f)); // imm6
-    write16le(Loc + 2,
-              0x8000 |                    // opcode
-                  ((Val >> 8) & 0x0800) | // J2
-                  ((Val >> 5) & 0x2000) | // J1
-                  ((Val >> 1) & 0x07ff)); // imm11
-    break;
-  case R_ARM_THM_CALL:
-    // R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
-    // value of bit 0 of Val, we must select a BL or BLX instruction
-    if ((Val & 1) == 0) {
-      // Ensure BLX destination is 4-byte aligned. As BLX instruction may
-      // only be two byte aligned. This must be done before overflow check
-      Val = alignTo(Val, 4);
-    }
-    // Bit 12 is 0 for BLX, 1 for BL
-    write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
-    // Fall through as rest of encoding is the same as B.W
-    LLVM_FALLTHROUGH;
-  case R_ARM_THM_JUMP24:
-    // Encoding B  T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
-    // FIXME: Use of I1 and I2 require v6T2ops
-    checkInt<25>(Loc, Val, Type);
-    write16le(Loc,
-              0xf000 |                     // opcode
-                  ((Val >> 14) & 0x0400) | // S
-                  ((Val >> 12) & 0x03ff)); // imm10
-    write16le(Loc + 2,
-              (read16le(Loc + 2) & 0xd000) |                  // opcode
-                  (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
-                  (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
-                  ((Val >> 1) & 0x07ff));                     // imm11
-    break;
-  case R_ARM_MOVW_ABS_NC:
-  case R_ARM_MOVW_PREL_NC:
-    write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
-                       (Val & 0x0fff));
-    break;
-  case R_ARM_MOVT_ABS:
-  case R_ARM_MOVT_PREL:
-    checkInt<32>(Loc, Val, Type);
-    write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
-                       (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
-    break;
-  case R_ARM_THM_MOVT_ABS:
-  case R_ARM_THM_MOVT_PREL:
-    // Encoding T1: A = imm4:i:imm3:imm8
-    checkInt<32>(Loc, Val, Type);
-    write16le(Loc,
-              0xf2c0 |                     // opcode
-                  ((Val >> 17) & 0x0400) | // i
-                  ((Val >> 28) & 0x000f)); // imm4
-    write16le(Loc + 2,
-              (read16le(Loc + 2) & 0x8f00) | // opcode
-                  ((Val >> 12) & 0x7000) |   // imm3
-                  ((Val >> 16) & 0x00ff));   // imm8
-    break;
-  case R_ARM_THM_MOVW_ABS_NC:
-  case R_ARM_THM_MOVW_PREL_NC:
-    // Encoding T3: A = imm4:i:imm3:imm8
-    write16le(Loc,
-              0xf240 |                     // opcode
-                  ((Val >> 1) & 0x0400) |  // i
-                  ((Val >> 12) & 0x000f)); // imm4
-    write16le(Loc + 2,
-              (read16le(Loc + 2) & 0x8f00) | // opcode
-                  ((Val << 4) & 0x7000) |    // imm3
-                  (Val & 0x00ff));           // imm8
-    break;
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-int64_t ARMTargetInfo::getImplicitAddend(const uint8_t *Buf,
-                                         uint32_t Type) const {
-  switch (Type) {
-  default:
-    return 0;
-  case R_ARM_ABS32:
-  case R_ARM_BASE_PREL:
-  case R_ARM_GOTOFF32:
-  case R_ARM_GOT_BREL:
-  case R_ARM_GOT_PREL:
-  case R_ARM_REL32:
-  case R_ARM_TARGET1:
-  case R_ARM_TARGET2:
-  case R_ARM_TLS_GD32:
-  case R_ARM_TLS_LDM32:
-  case R_ARM_TLS_LDO32:
-  case R_ARM_TLS_IE32:
-  case R_ARM_TLS_LE32:
-    return SignExtend64<32>(read32le(Buf));
-  case R_ARM_PREL31:
-    return SignExtend64<31>(read32le(Buf));
-  case R_ARM_CALL:
-  case R_ARM_JUMP24:
-  case R_ARM_PC24:
-  case R_ARM_PLT32:
-    return SignExtend64<26>(read32le(Buf) << 2);
-  case R_ARM_THM_JUMP11:
-    return SignExtend64<12>(read16le(Buf) << 1);
-  case R_ARM_THM_JUMP19: {
-    // Encoding T3: A = S:J2:J1:imm10:imm6:0
-    uint16_t Hi = read16le(Buf);
-    uint16_t Lo = read16le(Buf + 2);
-    return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
-                            ((Lo & 0x0800) << 8) |  // J2
-                            ((Lo & 0x2000) << 5) |  // J1
-                            ((Hi & 0x003f) << 12) | // imm6
-                            ((Lo & 0x07ff) << 1));  // imm11:0
-  }
-  case R_ARM_THM_CALL:
-  case R_ARM_THM_JUMP24: {
-    // Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
-    // I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
-    // FIXME: I1 and I2 require v6T2ops
-    uint16_t Hi = read16le(Buf);
-    uint16_t Lo = read16le(Buf + 2);
-    return SignExtend64<24>(((Hi & 0x0400) << 14) |                    // S
-                            (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
-                            (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
-                            ((Hi & 0x003ff) << 12) |                   // imm0
-                            ((Lo & 0x007ff) << 1)); // imm11:0
-  }
-  // ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
-  // MOVT is in the range -32768 <= A < 32768
-  case R_ARM_MOVW_ABS_NC:
-  case R_ARM_MOVT_ABS:
-  case R_ARM_MOVW_PREL_NC:
-  case R_ARM_MOVT_PREL: {
-    uint64_t Val = read32le(Buf) & 0x000f0fff;
-    return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
-  }
-  case R_ARM_THM_MOVW_ABS_NC:
-  case R_ARM_THM_MOVT_ABS:
-  case R_ARM_THM_MOVW_PREL_NC:
-  case R_ARM_THM_MOVT_PREL: {
-    // Encoding T3: A = imm4:i:imm3:imm8
-    uint16_t Hi = read16le(Buf);
-    uint16_t Lo = read16le(Buf + 2);
-    return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
-                            ((Hi & 0x0400) << 1) |  // i
-                            ((Lo & 0x7000) >> 4) |  // imm3
-                            (Lo & 0x00ff));         // imm8
-  }
-  }
-}
-
-RelExpr AVRTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                  const uint8_t *Loc) const {
-  switch (Type) {
-  case R_AVR_CALL:
-    return R_ABS;
-  default:
-    error(toString(S.File) + ": unknown relocation type: " + toString(Type));
-    return R_HINT;
-  }
-}
-
-void AVRTargetInfo::relocateOne(uint8_t *Loc, uint32_t Type,
-                                uint64_t Val) const {
-  switch (Type) {
-  case R_AVR_CALL: {
-    uint16_t Hi = Val >> 17;
-    uint16_t Lo = Val >> 1;
-    write16le(Loc, read16le(Loc) | ((Hi >> 1) << 4) | (Hi & 1));
-    write16le(Loc + 2, Lo);
-    break;
-  }
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + toString(Type));
-  }
-}
-
-template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
-  GotPltHeaderEntriesNum = 2;
-  DefaultMaxPageSize = 65536;
-  GotEntrySize = sizeof(typename ELFT::uint);
-  GotPltEntrySize = sizeof(typename ELFT::uint);
-  PltEntrySize = 16;
-  PltHeaderSize = 32;
-  CopyRel = R_MIPS_COPY;
-  PltRel = R_MIPS_JUMP_SLOT;
-  NeedsThunks = true;
-  if (ELFT::Is64Bits) {
-    RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
-    TlsGotRel = R_MIPS_TLS_TPREL64;
-    TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
-    TlsOffsetRel = R_MIPS_TLS_DTPREL64;
-  } else {
-    RelativeRel = R_MIPS_REL32;
-    TlsGotRel = R_MIPS_TLS_TPREL32;
-    TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
-    TlsOffsetRel = R_MIPS_TLS_DTPREL32;
-  }
-}
-
-template <class ELFT>
-RelExpr MipsTargetInfo<ELFT>::getRelExpr(uint32_t Type, const SymbolBody &S,
-                                         const uint8_t *Loc) const {
-  // See comment in the calculateMipsRelChain.
-  if (ELFT::Is64Bits || Config->MipsN32Abi)
-    Type &= 0xff;
-  switch (Type) {
-  default:
-    return R_ABS;
-  case R_MIPS_JALR:
-    return R_HINT;
-  case R_MIPS_GPREL16:
-  case R_MIPS_GPREL32:
-    return R_MIPS_GOTREL;
-  case R_MIPS_26:
-    return R_PLT;
-  case R_MIPS_HI16:
-  case R_MIPS_LO16:
-    // R_MIPS_HI16/R_MIPS_LO16 relocations against _gp_disp calculate
-    // offset between start of function and 'gp' value which by default
-    // equal to the start of .got section. In that case we consider these
-    // relocations as relative.
-    if (&S == ElfSym::MipsGpDisp)
-      return R_MIPS_GOT_GP_PC;
-    if (&S == ElfSym::MipsLocalGp)
-      return R_MIPS_GOT_GP;
-    LLVM_FALLTHROUGH;
-  case R_MIPS_GOT_OFST:
-    return R_ABS;
-  case R_MIPS_PC32:
-  case R_MIPS_PC16:
-  case R_MIPS_PC19_S2:
-  case R_MIPS_PC21_S2:
-  case R_MIPS_PC26_S2:
-  case R_MIPS_PCHI16:
-  case R_MIPS_PCLO16:
-    return R_PC;
-  case R_MIPS_GOT16:
-    if (S.isLocal())
-      return R_MIPS_GOT_LOCAL_PAGE;
-    LLVM_FALLTHROUGH;
-  case R_MIPS_CALL16:
-  case R_MIPS_GOT_DISP:
-  case R_MIPS_TLS_GOTTPREL:
-    return R_MIPS_GOT_OFF;
-  case R_MIPS_CALL_HI16:
-  case R_MIPS_CALL_LO16:
-  case R_MIPS_GOT_HI16:
-  case R_MIPS_GOT_LO16:
-    return R_MIPS_GOT_OFF32;
-  case R_MIPS_GOT_PAGE:
-    return R_MIPS_GOT_LOCAL_PAGE;
-  case R_MIPS_TLS_GD:
-    return R_MIPS_TLSGD;
-  case R_MIPS_TLS_LDM:
-    return R_MIPS_TLSLD;
-  }
-}
-
-template <class ELFT> bool MipsTargetInfo<ELFT>::isPicRel(uint32_t Type) const {
-  return Type == R_MIPS_32 || Type == R_MIPS_64;
-}
-
-template <class ELFT>
-uint32_t MipsTargetInfo<ELFT>::getDynRel(uint32_t Type) const {
-  return RelativeRel;
-}
-
-template <class ELFT>
-void MipsTargetInfo<ELFT>::writeGotPlt(uint8_t *Buf, const SymbolBody &) const {
-  write32<ELFT::TargetEndianness>(Buf, InX::Plt->getVA());
-}
-
-template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
-static int64_t getPcRelocAddend(const uint8_t *Loc) {
-  uint32_t Instr = read32<E>(Loc);
-  uint32_t Mask = 0xffffffff >> (32 - BSIZE);
-  return SignExtend64<BSIZE + SHIFT>((Instr & Mask) << SHIFT);
-}
-
-template <endianness E, uint8_t BSIZE, uint8_t SHIFT>
-static void applyMipsPcReloc(uint8_t *Loc, uint32_t Type, uint64_t V) {
-  uint32_t Mask = 0xffffffff >> (32 - BSIZE);
-  uint32_t Instr = read32<E>(Loc);
-  if (SHIFT > 0)
-    checkAlignment<(1 << SHIFT)>(Loc, V, Type);
-  checkInt<BSIZE + SHIFT>(Loc, V, Type);
-  write32<E>(Loc, (Instr & ~Mask) | ((V >> SHIFT) & Mask));
-}
-
-template <endianness E> static void writeMipsHi16(uint8_t *Loc, uint64_t V) {
-  uint32_t Instr = read32<E>(Loc);
-  uint16_t Res = ((V + 0x8000) >> 16) & 0xffff;
-  write32<E>(Loc, (Instr & 0xffff0000) | Res);
-}
-
-template <endianness E> static void writeMipsHigher(uint8_t *Loc, uint64_t V) {
-  uint32_t Instr = read32<E>(Loc);
-  uint16_t Res = ((V + 0x80008000) >> 32) & 0xffff;
-  write32<E>(Loc, (Instr & 0xffff0000) | Res);
-}
-
-template <endianness E> static void writeMipsHighest(uint8_t *Loc, uint64_t V) {
-  uint32_t Instr = read32<E>(Loc);
-  uint16_t Res = ((V + 0x800080008000) >> 48) & 0xffff;
-  write32<E>(Loc, (Instr & 0xffff0000) | Res);
-}
-
-template <endianness E> static void writeMipsLo16(uint8_t *Loc, uint64_t V) {
-  uint32_t Instr = read32<E>(Loc);
-  write32<E>(Loc, (Instr & 0xffff0000) | (V & 0xffff));
-}
-
-template <class ELFT> static bool isMipsR6() {
-  const auto &FirstObj = cast<ELFFileBase<ELFT>>(*Config->FirstElf);
-  uint32_t Arch = FirstObj.getObj().getHeader()->e_flags & EF_MIPS_ARCH;
-  return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6;
-}
-
-template <class ELFT>
-void MipsTargetInfo<ELFT>::writePltHeader(uint8_t *Buf) const {
-  const endianness E = ELFT::TargetEndianness;
-  if (Config->MipsN32Abi) {
-    write32<E>(Buf, 0x3c0e0000);      // lui   $14, %hi(&GOTPLT[0])
-    write32<E>(Buf + 4, 0x8dd90000);  // lw    $25, %lo(&GOTPLT[0])($14)
-    write32<E>(Buf + 8, 0x25ce0000);  // addiu $14, $14, %lo(&GOTPLT[0])
-    write32<E>(Buf + 12, 0x030ec023); // subu  $24, $24, $14
-  } else {
-    write32<E>(Buf, 0x3c1c0000);      // lui   $28, %hi(&GOTPLT[0])
-    write32<E>(Buf + 4, 0x8f990000);  // lw    $25, %lo(&GOTPLT[0])($28)
-    write32<E>(Buf + 8, 0x279c0000);  // addiu $28, $28, %lo(&GOTPLT[0])
-    write32<E>(Buf + 12, 0x031cc023); // subu  $24, $24, $28
-  }
-
-  write32<E>(Buf + 16, 0x03e07825); // move  $15, $31
-  write32<E>(Buf + 20, 0x0018c082); // srl   $24, $24, 2
-  write32<E>(Buf + 24, 0x0320f809); // jalr  $25
-  write32<E>(Buf + 28, 0x2718fffe); // subu  $24, $24, 2
-
-  uint64_t GotPlt = InX::GotPlt->getVA();
-  writeMipsHi16<E>(Buf, GotPlt);
-  writeMipsLo16<E>(Buf + 4, GotPlt);
-  writeMipsLo16<E>(Buf + 8, GotPlt);
-}
-
-template <class ELFT>
-void MipsTargetInfo<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
-                                    uint64_t PltEntryAddr, int32_t Index,
-                                    unsigned RelOff) const {
-  const endianness E = ELFT::TargetEndianness;
-  write32<E>(Buf, 0x3c0f0000);     // lui   $15, %hi(.got.plt entry)
-  write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
-                                   // jr    $25
-  write32<E>(Buf + 8, isMipsR6<ELFT>() ? 0x03200009 : 0x03200008);
-  write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
-  writeMipsHi16<E>(Buf, GotPltEntryAddr);
-  writeMipsLo16<E>(Buf + 4, GotPltEntryAddr);
-  writeMipsLo16<E>(Buf + 12, GotPltEntryAddr);
-}
-
-template <class ELFT>
-bool MipsTargetInfo<ELFT>::needsThunk(RelExpr Expr, uint32_t Type,
-                                      const InputFile *File,
-                                      const SymbolBody &S) const {
-  // Any MIPS PIC code function is invoked with its address in register $t9.
-  // So if we have a branch instruction from non-PIC code to the PIC one
-  // we cannot make the jump directly and need to create a small stubs
-  // to save the target function address.
-  // See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
-  if (Type != R_MIPS_26)
-    return false;
-  auto *F = dyn_cast_or_null<ELFFileBase<ELFT>>(File);
-  if (!F)
-    return false;
-  // If current file has PIC code, LA25 stub is not required.
-  if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
-    return false;
-  auto *D = dyn_cast<DefinedRegular>(&S);
-  // LA25 is required if target file has PIC code
-  // or target symbol is a PIC symbol.
-  return D && D->isMipsPIC<ELFT>();
-}
-
-template <class ELFT>
-int64_t MipsTargetInfo<ELFT>::getImplicitAddend(const uint8_t *Buf,
-                                                uint32_t Type) const {
-  const endianness E = ELFT::TargetEndianness;
-  switch (Type) {
-  default:
-    return 0;
-  case R_MIPS_32:
-  case R_MIPS_GPREL32:
-  case R_MIPS_TLS_DTPREL32:
-  case R_MIPS_TLS_TPREL32:
-    return SignExtend64<32>(read32<E>(Buf));
-  case R_MIPS_26:
-    // FIXME (simon): If the relocation target symbol is not a PLT entry
-    // we should use another expression for calculation:
-    // ((A << 2) | (P & 0xf0000000)) >> 2
-    return SignExtend64<28>((read32<E>(Buf) & 0x3ffffff) << 2);
-  case R_MIPS_GPREL16:
-  case R_MIPS_LO16:
-  case R_MIPS_PCLO16:
-  case R_MIPS_TLS_DTPREL_HI16:
-  case R_MIPS_TLS_DTPREL_LO16:
-  case R_MIPS_TLS_TPREL_HI16:
-  case R_MIPS_TLS_TPREL_LO16:
-    return SignExtend64<16>(read32<E>(Buf));
-  case R_MIPS_PC16:
-    return getPcRelocAddend<E, 16, 2>(Buf);
-  case R_MIPS_PC19_S2:
-    return getPcRelocAddend<E, 19, 2>(Buf);
-  case R_MIPS_PC21_S2:
-    return getPcRelocAddend<E, 21, 2>(Buf);
-  case R_MIPS_PC26_S2:
-    return getPcRelocAddend<E, 26, 2>(Buf);
-  case R_MIPS_PC32:
-    return getPcRelocAddend<E, 32, 0>(Buf);
-  }
-}
-
-static std::pair<uint32_t, uint64_t>
-calculateMipsRelChain(uint8_t *Loc, uint32_t Type, uint64_t Val) {
-  // MIPS N64 ABI packs multiple relocations into the single relocation
-  // record. In general, all up to three relocations can have arbitrary
-  // types. In fact, Clang and GCC uses only a few combinations. For now,
-  // we support two of them. That is allow to pass at least all LLVM
-  // test suite cases.
-  // <any relocation> / R_MIPS_SUB / R_MIPS_HI16 | R_MIPS_LO16
-  // <any relocation> / R_MIPS_64 / R_MIPS_NONE
-  // The first relocation is a 'real' relocation which is calculated
-  // using the corresponding symbol's value. The second and the third
-  // relocations used to modify result of the first one: extend it to
-  // 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
-  // at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
-  uint32_t Type2 = (Type >> 8) & 0xff;
-  uint32_t Type3 = (Type >> 16) & 0xff;
-  if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
-    return std::make_pair(Type, Val);
-  if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
-    return std::make_pair(Type2, Val);
-  if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
-    return std::make_pair(Type3, -Val);
-  error(getErrorLocation(Loc) + "unsupported relocations combination " +
-        Twine(Type));
-  return std::make_pair(Type & 0xff, Val);
-}
-
-template <class ELFT>
-void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Loc, uint32_t Type,
-                                       uint64_t Val) const {
-  const endianness E = ELFT::TargetEndianness;
-  // Thread pointer and DRP offsets from the start of TLS data area.
-  // https://www.linux-mips.org/wiki/NPTL
-  if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 ||
-      Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64)
-    Val -= 0x8000;
-  else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 ||
-           Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64)
-    Val -= 0x7000;
-  if (ELFT::Is64Bits || Config->MipsN32Abi)
-    std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val);
-  switch (Type) {
-  case R_MIPS_32:
-  case R_MIPS_GPREL32:
-  case R_MIPS_TLS_DTPREL32:
-  case R_MIPS_TLS_TPREL32:
-    write32<E>(Loc, Val);
-    break;
-  case R_MIPS_64:
-  case R_MIPS_TLS_DTPREL64:
-  case R_MIPS_TLS_TPREL64:
-    write64<E>(Loc, Val);
-    break;
-  case R_MIPS_26:
-    write32<E>(Loc, (read32<E>(Loc) & ~0x3ffffff) | ((Val >> 2) & 0x3ffffff));
-    break;
-  case R_MIPS_GOT16:
-    // The R_MIPS_GOT16 relocation's value in "relocatable" linking mode
-    // is updated addend (not a GOT index). In that case write high 16 bits
-    // to store a correct addend value.
-    if (Config->Relocatable)
-      writeMipsHi16<E>(Loc, Val);
-    else {
-      checkInt<16>(Loc, Val, Type);
-      writeMipsLo16<E>(Loc, Val);
-    }
-    break;
-  case R_MIPS_GOT_DISP:
-  case R_MIPS_GOT_PAGE:
-  case R_MIPS_GPREL16:
-  case R_MIPS_TLS_GD:
-  case R_MIPS_TLS_LDM:
-    checkInt<16>(Loc, Val, Type);
-    LLVM_FALLTHROUGH;
-  case R_MIPS_CALL16:
-  case R_MIPS_CALL_LO16:
-  case R_MIPS_GOT_LO16:
-  case R_MIPS_GOT_OFST:
-  case R_MIPS_LO16:
-  case R_MIPS_PCLO16:
-  case R_MIPS_TLS_DTPREL_LO16:
-  case R_MIPS_TLS_GOTTPREL:
-  case R_MIPS_TLS_TPREL_LO16:
-    writeMipsLo16<E>(Loc, Val);
-    break;
-  case R_MIPS_CALL_HI16:
-  case R_MIPS_GOT_HI16:
-  case R_MIPS_HI16:
-  case R_MIPS_PCHI16:
-  case R_MIPS_TLS_DTPREL_HI16:
-  case R_MIPS_TLS_TPREL_HI16:
-    writeMipsHi16<E>(Loc, Val);
-    break;
-  case R_MIPS_HIGHER:
-    writeMipsHigher<E>(Loc, Val);
-    break;
-  case R_MIPS_HIGHEST:
-    writeMipsHighest<E>(Loc, Val);
-    break;
-  case R_MIPS_JALR:
-    // Ignore this optimization relocation for now
-    break;
-  case R_MIPS_PC16:
-    applyMipsPcReloc<E, 16, 2>(Loc, Type, Val);
-    break;
-  case R_MIPS_PC19_S2:
-    applyMipsPcReloc<E, 19, 2>(Loc, Type, Val);
-    break;
-  case R_MIPS_PC21_S2:
-    applyMipsPcReloc<E, 21, 2>(Loc, Type, Val);
-    break;
-  case R_MIPS_PC26_S2:
-    applyMipsPcReloc<E, 26, 2>(Loc, Type, Val);
-    break;
-  case R_MIPS_PC32:
-    applyMipsPcReloc<E, 32, 0>(Loc, Type, Val);
-    break;
-  default:
-    error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
-  }
-}
-
-template <class ELFT>
-bool MipsTargetInfo<ELFT>::usesOnlyLowPageBits(uint32_t Type) const {
-  return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST;
-}
-}
-}

Modified: lld/trunk/ELF/Target.h
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/Target.h?rev=305565&r1=305564&r2=305565&view=diff
==============================================================================
--- lld/trunk/ELF/Target.h (original)
+++ lld/trunk/ELF/Target.h Fri Jun 16 12:32:43 2017
@@ -10,13 +10,13 @@
 #ifndef LLD_ELF_TARGET_H
 #define LLD_ELF_TARGET_H
 
+#include "Error.h"
 #include "InputSection.h"
-#include "llvm/ADT/StringRef.h"
 #include "llvm/Object/ELF.h"
 
-#include <memory>
-
 namespace lld {
+std::string toString(uint32_t RelType);
+
 namespace elf {
 class InputFile;
 class SymbolBody;
@@ -102,14 +102,53 @@ public:
   virtual void relaxTlsLdToLe(uint8_t *Loc, uint32_t Type, uint64_t Val) const;
 };
 
+TargetInfo *createAArch64TargetInfo();
+TargetInfo *createAMDGPUTargetInfo();
+TargetInfo *createARMTargetInfo();
+TargetInfo *createAVRTargetInfo();
+TargetInfo *createPPC64TargetInfo();
+TargetInfo *createPPCTargetInfo();
+TargetInfo *createX32TargetInfo();
+TargetInfo *createX86TargetInfo();
+TargetInfo *createX86_64TargetInfo();
+template <class ELFT> TargetInfo *createMipsTargetInfo();
+
+std::string getErrorLocation(const uint8_t *Loc);
+
 uint64_t getPPC64TocBase();
 uint64_t getAArch64Page(uint64_t Expr);
 
 extern TargetInfo *Target;
 TargetInfo *createTarget();
+
+template <unsigned N>
+static void checkInt(uint8_t *Loc, int64_t V, uint32_t Type) {
+  if (!llvm::isInt<N>(V))
+    error(getErrorLocation(Loc) + "relocation " + lld::toString(Type) +
+          " out of range");
 }
 
-std::string toString(uint32_t RelType);
+template <unsigned N>
+static void checkUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
+  if (!llvm::isUInt<N>(V))
+    error(getErrorLocation(Loc) + "relocation " + lld::toString(Type) +
+          " out of range");
+}
+
+template <unsigned N>
+static void checkIntUInt(uint8_t *Loc, uint64_t V, uint32_t Type) {
+  if (!llvm::isInt<N>(V) && !llvm::isUInt<N>(V))
+    error(getErrorLocation(Loc) + "relocation " + lld::toString(Type) +
+          " out of range");
+}
+
+template <unsigned N>
+static void checkAlignment(uint8_t *Loc, uint64_t V, uint32_t Type) {
+  if ((V & (N - 1)) != 0)
+    error(getErrorLocation(Loc) + "improper alignment for relocation " +
+          lld::toString(Type));
+}
+} // namespace elf
 }
 
 #endif




More information about the llvm-commits mailing list