[lld] r220730 - TMP: fix readN & writeN to not encourage UB
Tim Northover
tnorthover at apple.com
Mon Oct 27 15:48:36 PDT 2014
Author: tnorthover
Date: Mon Oct 27 17:48:35 2014
New Revision: 220730
URL: http://llvm.org/viewvc/llvm-project?rev=220730&view=rev
Log:
TMP: fix readN & writeN to not encourage UB
Modified:
lld/trunk/lib/ReaderWriter/MachO/ArchHandler.cpp
lld/trunk/lib/ReaderWriter/MachO/ArchHandler.h
lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm.cpp
lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp
lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86.cpp
lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp
lld/trunk/lib/ReaderWriter/MachO/CompactUnwindPass.cpp
lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp
lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h
lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler.cpp Mon Oct 27 17:48:35 2014
@@ -124,31 +124,31 @@ void ArchHandler::appendReloc(normalized
}
-int16_t ArchHandler::readS16(bool swap, const uint8_t *addr) {
- return read16(swap, *reinterpret_cast<const uint16_t*>(addr));
+int16_t ArchHandler::readS16(const uint8_t *addr, bool isBig) {
+ return read16(addr, isBig);
}
-int32_t ArchHandler::readS32(bool swap, const uint8_t *addr) {
- return read32(swap, *reinterpret_cast<const uint32_t*>(addr));
+int32_t ArchHandler::readS32(const uint8_t *addr, bool isBig) {
+ return read32(addr, isBig);
}
-uint32_t ArchHandler::readU32(bool swap, const uint8_t *addr) {
- return read32(swap, *reinterpret_cast<const uint32_t*>(addr));
+uint32_t ArchHandler::readU32(const uint8_t *addr, bool isBig) {
+ return read32(addr, isBig);
}
-int64_t ArchHandler::readS64(bool swap, const uint8_t *addr) {
- return read64(swap, *reinterpret_cast<const uint64_t*>(addr));
+ int64_t ArchHandler::readS64(const uint8_t *addr, bool isBig) {
+ return read64(addr, isBig);
}
-bool ArchHandler::isDwarfCIE(bool swap, const DefinedAtom *atom) {
+bool ArchHandler::isDwarfCIE(bool isBig, const DefinedAtom *atom) {
assert(atom->contentType() == DefinedAtom::typeCFI);
- uint32_t size = read32(swap, *(uint32_t *)atom->rawContent().data());
+ uint32_t size = read32(atom->rawContent().data(), isBig);
uint32_t idOffset = sizeof(uint32_t);
if (size == 0xffffffffU)
idOffset += sizeof(uint64_t);
- return read32(swap, *(uint32_t *)(atom->rawContent().data() + idOffset)) == 0;
+ return read32(atom->rawContent().data() + idOffset, isBig) == 0;
}
const Atom *ArchHandler::fdeTargetFunction(const DefinedAtom *fde) {
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler.h
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler.h?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler.h (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler.h Mon Oct 27 17:48:35 2014
@@ -118,7 +118,7 @@ public:
getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap,
+ uint64_t fixupAddress, bool isBigEndian,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex,
Reference::KindValue *kind,
@@ -134,7 +134,7 @@ public:
const normalized::Relocation &reloc2,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap, bool scatterable,
+ uint64_t fixupAddress, bool isBig, bool scatterable,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex,
Reference::KindValue *kind,
@@ -211,7 +211,7 @@ public:
}
/// Does a given unwind-cfi atom represent a CIE (as opposed to an FDE).
- static bool isDwarfCIE(bool swap, const DefinedAtom *atom);
+ static bool isDwarfCIE(bool isBig, const DefinedAtom *atom);
struct ReferenceInfo {
Reference::KindArch arch;
@@ -288,10 +288,10 @@ protected:
RelocPattern pattern);
- static int16_t readS16(bool swap, const uint8_t *addr);
- static int32_t readS32(bool swap, const uint8_t *addr);
- static uint32_t readU32(bool swap, const uint8_t *addr);
- static int64_t readS64(bool swap, const uint8_t *addr);
+ static int16_t readS16(const uint8_t *addr, bool isBig);
+ static int32_t readS32(const uint8_t *addr, bool isBig);
+ static uint32_t readU32(const uint8_t *addr, bool isBig);
+ static int64_t readS64(const uint8_t *addr, bool isBig);
};
} // namespace mach_o
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm.cpp Mon Oct 27 17:48:35 2014
@@ -13,6 +13,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm::MachO;
@@ -21,6 +23,10 @@ using namespace lld::mach_o::normalized;
namespace lld {
namespace mach_o {
+using llvm::support::ulittle32_t;
+using llvm::support::little32_t;
+
+
class ArchHandler_arm : public ArchHandler {
public:
ArchHandler_arm();
@@ -191,16 +197,13 @@ private:
uint64_t targetAddress,
uint64_t inAtomAddress, bool &thumbMode,
bool targetIsThumb);
-
- const bool _swap;
};
//===----------------------------------------------------------------------===//
// ArchHandler_arm
//===----------------------------------------------------------------------===//
-ArchHandler_arm::ArchHandler_arm() :
- _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_armv7)) {}
+ArchHandler_arm::ArchHandler_arm() { }
ArchHandler_arm::~ArchHandler_arm() { }
@@ -511,14 +514,14 @@ uint32_t ArchHandler_arm::clearThumbBit(
std::error_code ArchHandler_arm::getReferenceInfo(
const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap,
+ uint64_t fixupAddress, bool isBig,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
const lld::Atom **target, Reference::Addend *addend) {
typedef std::error_code E;
const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
uint64_t targetAddress;
- uint32_t instruction = readU32(swap, fixupContent);
+ uint32_t instruction = *(ulittle32_t *)fixupContent;
int32_t displacement;
switch (relocPattern(reloc)) {
case ARM_THUMB_RELOC_BR22 | rPcRel | rExtern | rLength4:
@@ -626,7 +629,7 @@ ArchHandler_arm::getPairReferenceInfo(co
const normalized::Relocation &reloc2,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap,
+ uint64_t fixupAddress, bool isBig,
bool scatterable,
FindAtomBySectionAndAddress atomFromAddr,
FindAtomBySymbolIndex atomFromSymbolIndex,
@@ -778,7 +781,7 @@ ArchHandler_arm::getPairReferenceInfo(co
}
const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
std::error_code ec;
- uint32_t instruction = readU32(swap, fixupContent);
+ uint32_t instruction = *(ulittle32_t *)fixupContent;
uint32_t value;
uint32_t fromAddress;
uint32_t toAddress;
@@ -898,7 +901,7 @@ ArchHandler_arm::getPairReferenceInfo(co
return std::error_code();
}
-void ArchHandler_arm::applyFixupFinal(const Reference &ref, uint8_t *location,
+void ArchHandler_arm::applyFixupFinal(const Reference &ref, uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress,
@@ -906,7 +909,7 @@ void ArchHandler_arm::applyFixupFinal(co
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::ARM);
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
int32_t displacement;
uint16_t value16;
uint32_t value32;
@@ -923,76 +926,76 @@ void ArchHandler_arm::applyFixupFinal(co
case thumb_bl22:
assert(thumbMode);
displacement = (targetAddress - (fixupAddress + 4)) + ref.addend();
- value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, displacement,
- targetIsThumb);
- write32(*loc32, _swap, value32);
+ value32 = setDisplacementInThumbBranch(*loc32, fixupAddress,
+ displacement, targetIsThumb);
+ *loc32 = value32;
break;
case thumb_movw:
assert(thumbMode);
value16 = (targetAddress + ref.addend()) & 0xFFFF;
if (targetIsThumb)
value16 |= 1;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movt:
assert(thumbMode);
value16 = (targetAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movw_funcRel:
assert(thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF;
if (targetIsThumb)
value16 |= 1;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movt_funcRel:
assert(thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case arm_b24:
case arm_bl24:
assert(!thumbMode);
displacement = (targetAddress - (fixupAddress + 8)) + ref.addend();
value32 = setDisplacementInArmBranch(*loc32, displacement, targetIsThumb);
- write32(*loc32, _swap, value32);
+ *loc32 = value32;
break;
case arm_movw:
assert(!thumbMode);
value16 = (targetAddress + ref.addend()) & 0xFFFF;
if (targetIsThumb)
value16 |= 1;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movt:
assert(!thumbMode);
value16 = (targetAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movw_funcRel:
assert(!thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF;
if (targetIsThumb)
value16 |= 1;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movt_funcRel:
assert(!thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case pointer32:
if (targetIsThumb)
- write32(*loc32, _swap, targetAddress + ref.addend() + 1);
+ *loc32 = targetAddress + ref.addend() + 1;
else
- write32(*loc32, _swap, targetAddress + ref.addend());
+ *loc32 = targetAddress + ref.addend();
break;
case delta32:
if (targetIsThumb)
- write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend() + 1);
+ *loc32 = targetAddress - fixupAddress + ref.addend() + 1;
else
- write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend());
+ *loc32 = targetAddress - fixupAddress + ref.addend();
break;
case lazyPointer:
case lazyImmediateLocation:
@@ -1058,15 +1061,14 @@ bool ArchHandler_arm::useExternalRelocat
return false;
}
-void ArchHandler_arm::applyFixupRelocatable(const Reference &ref,
- uint8_t *location,
- uint64_t fixupAddress,
- uint64_t targetAddress,
- uint64_t inAtomAddress,
- bool &thumbMode,
- bool targetIsThumb) {
+void ArchHandler_arm::applyFixupRelocatable(const Reference &ref, uint8_t *loc,
+ uint64_t fixupAddress,
+ uint64_t targetAddress,
+ uint64_t inAtomAddress,
+ bool &thumbMode,
+ bool targetIsThumb) {
bool useExternalReloc = useExternalRelocationTo(*ref.target());
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
int32_t displacement;
uint16_t value16;
uint32_t value32;
@@ -1086,9 +1088,9 @@ void ArchHandler_arm::applyFixupRelocata
displacement = (ref.addend() - (fixupAddress + 4));
else
displacement = (targetAddress - (fixupAddress + 4)) + ref.addend();
- value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, displacement,
- targetIsThumb);
- write32(*loc32, _swap, value32);
+ value32 = setDisplacementInThumbBranch(*loc32, fixupAddress,
+ displacement, targetIsThumb);
+ *loc32 = value32;
break;
case thumb_movw:
assert(thumbMode);
@@ -1096,7 +1098,7 @@ void ArchHandler_arm::applyFixupRelocata
value16 = ref.addend() & 0xFFFF;
else
value16 = (targetAddress + ref.addend()) & 0xFFFF;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movt:
assert(thumbMode);
@@ -1104,17 +1106,17 @@ void ArchHandler_arm::applyFixupRelocata
value16 = ref.addend() >> 16;
else
value16 = (targetAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movw_funcRel:
assert(thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case thumb_movt_funcRel:
assert(thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16));
+ *loc32 = setWordFromThumbMov(*loc32, value16);
break;
case arm_b24:
case arm_bl24:
@@ -1123,8 +1125,9 @@ void ArchHandler_arm::applyFixupRelocata
displacement = (ref.addend() - (fixupAddress + 8));
else
displacement = (targetAddress - (fixupAddress + 8)) + ref.addend();
- value32 = setDisplacementInArmBranch(*loc32, displacement, targetIsThumb);
- write32(*loc32, _swap, value32);
+ value32 = setDisplacementInArmBranch(*loc32, displacement,
+ targetIsThumb);
+ *loc32 = value32;
break;
case arm_movw:
assert(!thumbMode);
@@ -1132,7 +1135,7 @@ void ArchHandler_arm::applyFixupRelocata
value16 = ref.addend() & 0xFFFF;
else
value16 = (targetAddress + ref.addend()) & 0xFFFF;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movt:
assert(!thumbMode);
@@ -1140,23 +1143,23 @@ void ArchHandler_arm::applyFixupRelocata
value16 = ref.addend() >> 16;
else
value16 = (targetAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movw_funcRel:
assert(!thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case arm_movt_funcRel:
assert(!thumbMode);
value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16;
- write32(*loc32, _swap, setWordFromArmMov(*loc32, value16));
+ *loc32 = setWordFromArmMov(*loc32, value16);
break;
case pointer32:
- write32(*loc32, _swap, targetAddress + ref.addend());
+ *loc32 = targetAddress + ref.addend();
break;
case delta32:
- write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend());
+ *loc32 = targetAddress - fixupAddress + ref.addend();
break;
case lazyPointer:
case lazyImmediateLocation:
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp Mon Oct 27 17:48:35 2014
@@ -13,6 +13,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
@@ -22,6 +24,12 @@ using namespace lld::mach_o::normalized;
namespace lld {
namespace mach_o {
+using llvm::support::ulittle32_t;
+using llvm::support::ulittle64_t;
+
+using llvm::support::little32_t;
+using llvm::support::little64_t;
+
class ArchHandler_arm64 : public ArchHandler {
public:
ArchHandler_arm64();
@@ -109,7 +117,7 @@ public:
std::error_code getReferenceInfo(const normalized::Relocation &reloc,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap,
+ uint64_t fixupAddress, bool isBig,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex,
Reference::KindValue *kind,
@@ -120,7 +128,7 @@ public:
const normalized::Relocation &reloc2,
const DefinedAtom *inAtom,
uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap, bool scatterable,
+ uint64_t fixupAddress, bool isBig, bool scatterable,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex,
Reference::KindValue *kind,
@@ -190,13 +198,9 @@ private:
static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
static Arm64_Kinds offset12KindFromInstruction(uint32_t instr);
static uint32_t setImm12(uint32_t instr, uint32_t offset);
-
- const bool _swap;
};
-ArchHandler_arm64::ArchHandler_arm64()
- : _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_arm64)) {
-}
+ArchHandler_arm64::ArchHandler_arm64() {}
ArchHandler_arm64::~ArchHandler_arm64() {}
@@ -334,7 +338,7 @@ uint32_t ArchHandler_arm64::setImm12(uin
std::error_code ArchHandler_arm64::getReferenceInfo(
const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
- uint64_t fixupAddress, bool swap,
+ uint64_t fixupAddress, bool isBig,
FindAtomBySectionAndAddress atomFromAddress,
FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
const lld::Atom **target, Reference::Addend *addend) {
@@ -356,7 +360,7 @@ std::error_code ArchHandler_arm64::getRe
return std::error_code();
case ARM64_RELOC_PAGEOFF12 | rExtern | rLength4:
// ex: ldr x0, [x1, _foo at PAGEOFF]
- *kind = offset12KindFromInstruction(readS32(swap, fixupContent));
+ *kind = offset12KindFromInstruction(*(little32_t *)fixupContent);
if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
*addend = 0;
@@ -394,7 +398,7 @@ std::error_code ArchHandler_arm64::getRe
*kind = pointer64;
if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS64(swap, fixupContent);
+ *addend = *(little64_t *)fixupContent;
return std::error_code();
case ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8:
// ex: .quad _foo at GOT
@@ -454,7 +458,7 @@ std::error_code ArchHandler_arm64::getPa
*kind = delta64;
if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
return ec;
- *addend = readS64(swap, fixupContent) + offsetInAtom;
+ *addend = *(little64_t *)fixupContent + offsetInAtom;
return std::error_code();
case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
ARM64_RELOC_UNSIGNED | rExtern | rLength4):
@@ -462,7 +466,7 @@ std::error_code ArchHandler_arm64::getPa
*kind = delta32;
if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent) + offsetInAtom;
+ *addend = *(little32_t *)fixupContent + offsetInAtom;
return std::error_code();
default:
return make_dynamic_error_code(Twine("unsupported arm64 relocation pair"));
@@ -494,86 +498,79 @@ void ArchHandler_arm64::generateAtomCont
}
}
-void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *location,
+void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::AArch64);
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- uint64_t *loc64 = reinterpret_cast<uint64_t *>(location);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
+ ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
int32_t displacement;
uint32_t instruction;
uint32_t value32;
switch (static_cast<Arm64_Kinds>(ref.kindValue())) {
case branch26:
displacement = (targetAddress - fixupAddress) + ref.addend();
- value32 = setDisplacementInBranch26(*loc32, displacement);
- write32(*loc32, _swap, value32);
+ *loc32 = setDisplacementInBranch26(*loc32, displacement);
return;
case page21:
case gotPage21:
case tlvPage21:
displacement =
((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
- value32 = setDisplacementInADRP(*loc32, displacement);
- write32(*loc32, _swap, value32);
+ *loc32 = setDisplacementInADRP(*loc32, displacement);
return;
case offset12:
case gotOffset12:
case tlvOffset12:
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
- value32 = setImm12(*loc32, displacement);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, displacement);
return;
case offset12scale2:
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
assert(((displacement & 0x1) == 0) &&
"scaled imm12 not accessing 2-byte aligneds");
- value32 = setImm12(*loc32, displacement >> 1);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, displacement >> 1);
return;
case offset12scale4:
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
assert(((displacement & 0x3) == 0) &&
"scaled imm12 not accessing 4-byte aligned");
- value32 = setImm12(*loc32, displacement >> 2);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, displacement >> 2);
return;
case offset12scale8:
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
assert(((displacement & 0x7) == 0) &&
"scaled imm12 not accessing 8-byte aligned");
- value32 = setImm12(*loc32, displacement >> 3);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, displacement >> 3);
return;
case offset12scale16:
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
assert(((displacement & 0xF) == 0) &&
"scaled imm12 not accessing 16-byte aligned");
- value32 = setImm12(*loc32, displacement >> 4);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, displacement >> 4);
return;
case addOffset12:
- instruction = read32(_swap, *loc32);
+ instruction = *loc32;
assert(((instruction & 0xFFC00000) == 0xF9400000) &&
"GOT reloc is not an LDR instruction");
displacement = (targetAddress + ref.addend()) & 0x00000FFF;
value32 = 0x91000000 | (instruction & 0x000003FF);
instruction = setImm12(value32, displacement);
- write32(*loc32, _swap, instruction);
+ *loc32 = instruction;
return;
case pointer64:
case pointer64ToGOT:
- write64(*loc64, _swap, targetAddress + ref.addend());
+ *loc64 = targetAddress + ref.addend();
return;
case delta64:
- write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc64 = (targetAddress - fixupAddress) + ref.addend();
return;
case delta32:
case delta32ToGOT:
- write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc32 = (targetAddress - fixupAddress) + ref.addend();
return;
case lazyPointer:
case lazyImmediateLocation:
@@ -587,26 +584,23 @@ void ArchHandler_arm64::applyFixupFinal(
}
void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
- uint8_t *location,
+ uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::AArch64);
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- uint64_t *loc64 = reinterpret_cast<uint64_t *>(location);
- uint32_t value32;
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
+ ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
switch (static_cast<Arm64_Kinds>(ref.kindValue())) {
case branch26:
- value32 = setDisplacementInBranch26(*loc32, 0);
- write32(*loc32, _swap, value32);
+ *loc32 = setDisplacementInBranch26(*loc32, 0);
return;
case page21:
case gotPage21:
case tlvPage21:
- value32 = setDisplacementInADRP(*loc32, 0);
- write32(*loc32, _swap, value32);
+ *loc32 = setDisplacementInADRP(*loc32, 0);
return;
case offset12:
case offset12scale2:
@@ -615,23 +609,22 @@ void ArchHandler_arm64::applyFixupReloca
case offset12scale16:
case gotOffset12:
case tlvOffset12:
- value32 = setImm12(*loc32, 0);
- write32(*loc32, _swap, value32);
+ *loc32 = setImm12(*loc32, 0);
return;
case pointer64:
- write64(*loc64, _swap, ref.addend());
+ *loc64 = ref.addend();
return;
case delta64:
- write64(*loc64, _swap, ref.addend() + inAtomAddress - fixupAddress);
+ *loc64 = ref.addend() + inAtomAddress - fixupAddress;
return;
case delta32:
- write32(*loc32, _swap, ref.addend() + inAtomAddress - fixupAddress);
+ *loc32 = ref.addend() + inAtomAddress - fixupAddress;
return;
case pointer64ToGOT:
- write64(*loc64, _swap, 0);
+ *loc64 = 0;
return;
case delta32ToGOT:
- write32(*loc32, _swap, -fixupAddress);
+ *loc32 = -fixupAddress;
return;
case addOffset12:
llvm_unreachable("lazy reference kind implies GOT pass was run");
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86.cpp Mon Oct 27 17:48:35 2014
@@ -13,6 +13,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm::MachO;
@@ -21,6 +23,12 @@ using namespace lld::mach_o::normalized;
namespace lld {
namespace mach_o {
+using llvm::support::ulittle16_t;
+using llvm::support::ulittle32_t;
+
+using llvm::support::little16_t;
+using llvm::support::little32_t;
+
class ArchHandler_x86 : public ArchHandler {
public:
ArchHandler_x86();
@@ -156,16 +164,13 @@ private:
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress);
-
- const bool _swap;
};
//===----------------------------------------------------------------------===//
// ArchHandler_x86
//===----------------------------------------------------------------------===//
-ArchHandler_x86::ArchHandler_x86() :
- _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_x86)) {}
+ArchHandler_x86::ArchHandler_x86() {}
ArchHandler_x86::~ArchHandler_x86() { }
@@ -257,18 +262,18 @@ ArchHandler_x86::getReferenceInfo(const
*kind = branch32;
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = fixupAddress + 4 + readS32(swap, fixupContent);
+ *addend = fixupAddress + 4 + *(little32_t *)fixupContent;
break;
case GENERIC_RELOC_VANILLA | rPcRel | rLength4:
// ex: call _foo (and _foo defined)
*kind = branch32;
- targetAddress = fixupAddress + 4 + readS32(swap, fixupContent);
+ targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent;
return atomFromAddress(reloc.symbol, targetAddress, target, addend);
break;
case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength4:
// ex: call _foo+n (and _foo defined)
*kind = branch32;
- targetAddress = fixupAddress + 4 + readS32(swap, fixupContent);
+ targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent;
if (E ec = atomFromAddress(0, reloc.value, target, addend))
return ec;
*addend = targetAddress - reloc.value;
@@ -278,18 +283,18 @@ ArchHandler_x86::getReferenceInfo(const
*kind = branch16;
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = fixupAddress + 2 + readS16(swap, fixupContent);
+ *addend = fixupAddress + 2 + *(little16_t *)fixupContent;
break;
case GENERIC_RELOC_VANILLA | rPcRel | rLength2:
// ex: callw _foo (and _foo defined)
*kind = branch16;
- targetAddress = fixupAddress + 2 + readS16(swap, fixupContent);
+ targetAddress = fixupAddress + 2 + *(little16_t *)fixupContent;
return atomFromAddress(reloc.symbol, targetAddress, target, addend);
break;
case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength2:
// ex: callw _foo+n (and _foo defined)
*kind = branch16;
- targetAddress = fixupAddress + 2 + readS16(swap, fixupContent);
+ targetAddress = fixupAddress + 2 + *(little16_t *)fixupContent;
if (E ec = atomFromAddress(0, reloc.value, target, addend))
return ec;
*addend = targetAddress - reloc.value;
@@ -303,7 +308,7 @@ ArchHandler_x86::getReferenceInfo(const
: pointer32;
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readU32(swap, fixupContent);
+ *addend = *(ulittle32_t *)fixupContent;
break;
case GENERIC_RELOC_VANILLA | rLength4:
// ex: movl _foo, %eax (and _foo defined)
@@ -312,7 +317,7 @@ ArchHandler_x86::getReferenceInfo(const
*kind =
((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32
: pointer32;
- targetAddress = readU32(swap, fixupContent);
+ targetAddress = *(ulittle32_t *)fixupContent;
return atomFromAddress(reloc.symbol, targetAddress, target, addend);
break;
case GENERIC_RELOC_VANILLA | rScattered | rLength4:
@@ -323,7 +328,7 @@ ArchHandler_x86::getReferenceInfo(const
: pointer32;
if (E ec = atomFromAddress(0, reloc.value, target, addend))
return ec;
- *addend = readU32(swap, fixupContent) - reloc.value;
+ *addend = *(ulittle32_t *)fixupContent - reloc.value;
break;
default:
return make_dynamic_error_code(Twine("unsupported i386 relocation type"));
@@ -359,7 +364,7 @@ ArchHandler_x86::getPairReferenceInfo(co
GENERIC_RELOC_PAIR | rScattered | rLength4):
toAddress = reloc1.value;
fromAddress = reloc2.value;
- value = readS32(swap, fixupContent);
+ value = *(little32_t *)fixupContent;
ec = atomFromAddr(0, toAddress, target, &offsetInTo);
if (ec)
return ec;
@@ -425,34 +430,33 @@ void ArchHandler_x86::generateAtomConten
}
}
-void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *location,
+void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::x86);
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- int16_t *loc16 = reinterpret_cast<int16_t *>(location);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
switch (ref.kindValue()) {
case branch32:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend());
+ *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
break;
case branch16:
- write16(*loc16, _swap, (targetAddress - (fixupAddress + 2)) + ref.addend());
+ *loc32 = (targetAddress - (fixupAddress + 2)) + ref.addend();
break;
case pointer32:
case abs32:
- write32(*loc32, _swap, targetAddress + ref.addend());
+ *loc32 = targetAddress + ref.addend();
break;
case funcRel32:
- write32(*loc32, _swap, targetAddress - inAtomAddress + ref.addend());
+ *loc32 = targetAddress - inAtomAddress + ref.addend();
break;
case delta32:
- write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend());
+ *loc32 = targetAddress - fixupAddress + ref.addend();
break;
case negDelta32:
- write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend());
+ *loc32 = fixupAddress - targetAddress + ref.addend();
break;
case modeCode:
case modeData:
@@ -467,38 +471,38 @@ void ArchHandler_x86::applyFixupFinal(co
}
void ArchHandler_x86::applyFixupRelocatable(const Reference &ref,
- uint8_t *location,
+ uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- int16_t *loc16 = reinterpret_cast<int16_t *>(location);
bool useExternalReloc = useExternalRelocationTo(*ref.target());
+ ulittle16_t *loc16 = reinterpret_cast<ulittle16_t *>(loc);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
switch (ref.kindValue()) {
case branch32:
if (useExternalReloc)
- write32(*loc32, _swap, ref.addend() - (fixupAddress + 4));
+ *loc32 = ref.addend() - (fixupAddress + 4);
else
- write32(*loc32, _swap, (targetAddress - (fixupAddress+4)) + ref.addend());
+ *loc32 =(targetAddress - (fixupAddress+4)) + ref.addend();
break;
case branch16:
if (useExternalReloc)
- write16(*loc16, _swap, ref.addend() - (fixupAddress + 2));
+ *loc16 = ref.addend() - (fixupAddress + 2);
else
- write16(*loc16, _swap, (targetAddress - (fixupAddress+2)) + ref.addend());
+ *loc16 = (targetAddress - (fixupAddress+2)) + ref.addend();
break;
case pointer32:
case abs32:
- write32(*loc32, _swap, targetAddress + ref.addend());
+ *loc32 = targetAddress + ref.addend();
break;
case funcRel32:
- write32(*loc32, _swap, targetAddress - inAtomAddress + ref.addend()); // FIXME
+ *loc32 = targetAddress - inAtomAddress + ref.addend(); // FIXME
break;
case delta32:
- write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend());
+ *loc32 = targetAddress - fixupAddress + ref.addend();
break;
case negDelta32:
- write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend());
+ *loc32 = fixupAddress - targetAddress + ref.addend();
break;
case modeCode:
case modeData:
Modified: lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp Mon Oct 27 17:48:35 2014
@@ -13,6 +13,8 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
+
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm::MachO;
@@ -21,6 +23,12 @@ using namespace lld::mach_o::normalized;
namespace lld {
namespace mach_o {
+using llvm::support::ulittle32_t;
+using llvm::support::ulittle64_t;
+
+using llvm::support::little32_t;
+using llvm::support::little64_t;
+
class ArchHandler_x86_64 : public ArchHandler {
public:
ArchHandler_x86_64();
@@ -200,13 +208,10 @@ private:
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress);
-
- const bool _swap;
};
-ArchHandler_x86_64::ArchHandler_x86_64() :
- _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_x86_64)) {}
+ArchHandler_x86_64::ArchHandler_x86_64() { }
ArchHandler_x86_64::~ArchHandler_x86_64() { }
@@ -335,25 +340,25 @@ ArchHandler_x86_64::getReferenceInfo(con
case ripRel32:
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent);
+ *addend = *(little32_t *)fixupContent;
return std::error_code();
case ripRel32Minus1:
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent) + 1;
+ *addend = *(little32_t *)fixupContent + 1;
return std::error_code();
case ripRel32Minus2:
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent) + 2;
+ *addend = *(little32_t *)fixupContent + 2;
return std::error_code();
case ripRel32Minus4:
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent) + 4;
+ *addend = *(little32_t *)fixupContent + 4;
return std::error_code();
case ripRel32Anon:
- targetAddress = fixupAddress + 4 + readS32(swap, fixupContent);
+ targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent;
return atomFromAddress(reloc.symbol, targetAddress, target, addend);
case ripRel32GotLoad:
case ripRel32Got:
@@ -364,10 +369,10 @@ ArchHandler_x86_64::getReferenceInfo(con
case pointer64:
if (E ec = atomFromSymbolIndex(reloc.symbol, target))
return ec;
- *addend = readS64(swap, fixupContent);
+ *addend = *(little64_t *)fixupContent;
return std::error_code();
case pointer64Anon:
- targetAddress = readS64(swap, fixupContent);
+ targetAddress = *(little64_t *)fixupContent;
return atomFromAddress(reloc.symbol, targetAddress, target, addend);
default:
llvm_unreachable("bad reloc kind");
@@ -422,18 +427,18 @@ ArchHandler_x86_64::getPairReferenceInfo
case delta64:
if (E ec = atomFromSymbolIndex(reloc2.symbol, target))
return ec;
- *addend = readS64(swap, fixupContent) + offsetInAtom;
+ *addend = *(little64_t *)fixupContent + offsetInAtom;
return std::error_code();
case delta32:
if (E ec = atomFromSymbolIndex(reloc2.symbol, target))
return ec;
- *addend = readS32(swap, fixupContent) + offsetInAtom;
+ *addend = *(little32_t *)fixupContent + offsetInAtom;
return std::error_code();
case delta64Anon:
- targetAddress = offsetInAtom + readS64(swap, fixupContent);
+ targetAddress = offsetInAtom + *(little64_t *)fixupContent;
return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
case delta32Anon:
- targetAddress = offsetInAtom + readS32(swap, fixupContent);
+ targetAddress = offsetInAtom + *(little32_t *)fixupContent;
return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
default:
llvm_unreachable("bad reloc pair kind");
@@ -468,52 +473,52 @@ void ArchHandler_x86_64::generateAtomCon
}
void ArchHandler_x86_64::applyFixupFinal(
- const Reference &ref, uint8_t *location, uint64_t fixupAddress,
+ const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
FindAddressForAtom findSectionAddress) {
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
+ ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
return;
assert(ref.kindArch() == Reference::KindArch::x86_64);
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- uint64_t *loc64 = reinterpret_cast<uint64_t *>(location);
switch (static_cast<X86_64_Kinds>(ref.kindValue())) {
case branch32:
case ripRel32:
case ripRel32Anon:
case ripRel32Got:
case ripRel32GotLoad:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend());
+ *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
return;
case pointer64:
case pointer64Anon:
- write64(*loc64, _swap, targetAddress + ref.addend());
+ *loc64 = targetAddress + ref.addend();
return;
case ripRel32Minus1:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 5)) + ref.addend());
+ *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
return;
case ripRel32Minus2:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 6)) + ref.addend());
+ *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
return;
case ripRel32Minus4:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 8)) + ref.addend());
+ *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
return;
case delta32:
case delta32Anon:
- write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc32 = targetAddress - fixupAddress + ref.addend();
return;
case delta64:
case delta64Anon:
case unwindFDEToFunction:
- write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc64 = targetAddress - fixupAddress + ref.addend();
return;
case ripRel32GotLoadNowLea:
// Change MOVQ to LEA
- assert(location[-2] == 0x8B);
- location[-2] = 0x8D;
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend());
+ assert(loc[-2] == 0x8B);
+ loc[-2] = 0x8D;
+ *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
return;
case negDelta32:
- write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend());
+ *loc32 = fixupAddress - targetAddress + ref.addend();
return;
case lazyPointer:
case lazyImmediateLocation:
@@ -521,13 +526,12 @@ void ArchHandler_x86_64::applyFixupFinal
return;
case imageOffset:
case imageOffsetGot:
- write32(*loc32, _swap, (targetAddress - imageBaseAddress) + ref.addend());
+ *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
return;
case unwindInfoToEhFrame: {
uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
assert(val < 0xffffffU && "offset in __eh_frame too large");
- uint32_t encoding = read32(_swap, *loc32) & 0xff000000U;
- write32(*loc32, _swap, encoding | val);
+ *loc32 = (*loc32 & 0xff000000U) | val;
return;
}
case invalid:
@@ -539,51 +543,51 @@ void ArchHandler_x86_64::applyFixupFinal
void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
- uint8_t *location,
+ uint8_t *loc,
uint64_t fixupAddress,
uint64_t targetAddress,
uint64_t inAtomAddress) {
- int32_t *loc32 = reinterpret_cast<int32_t *>(location);
- uint64_t *loc64 = reinterpret_cast<uint64_t *>(location);
+ ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
+ ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
switch (static_cast<X86_64_Kinds>(ref.kindValue())) {
case branch32:
case ripRel32:
case ripRel32Got:
case ripRel32GotLoad:
- write32(*loc32, _swap, ref.addend());
+ *loc32 = ref.addend();
return;
case ripRel32Anon:
- write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend());
+ *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
return;
case pointer64:
- write64(*loc64, _swap, ref.addend());
+ *loc64 = ref.addend();
return;
case pointer64Anon:
- write64(*loc64, _swap, targetAddress + ref.addend());
+ *loc64 = targetAddress + ref.addend();
return;
case ripRel32Minus1:
- write32(*loc32, _swap, ref.addend() - 1);
+ *loc32 = ref.addend() - 1;
return;
case ripRel32Minus2:
- write32(*loc32, _swap, ref.addend() - 2);
+ *loc32 = ref.addend() - 2;
return;
case ripRel32Minus4:
- write32(*loc32, _swap, ref.addend() - 4);
+ *loc32 = ref.addend() - 4;
return;
case delta32:
- write32(*loc32, _swap, ref.addend() + inAtomAddress - fixupAddress);
+ *loc32 = ref.addend() + inAtomAddress - fixupAddress;
return;
case delta32Anon:
- write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc32 = (targetAddress - fixupAddress) + ref.addend();
return;
case delta64:
- write64(*loc64, _swap, ref.addend() + inAtomAddress - fixupAddress);
+ *loc64 = ref.addend() + inAtomAddress - fixupAddress;
return;
case delta64Anon:
- write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend());
+ *loc64 = (targetAddress - fixupAddress) + ref.addend();
return;
case negDelta32:
- write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend());
+ *loc32 = fixupAddress - targetAddress + ref.addend();
return;
case ripRel32GotLoadNowLea:
llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
Modified: lld/trunk/lib/ReaderWriter/MachO/CompactUnwindPass.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/CompactUnwindPass.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/CompactUnwindPass.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/CompactUnwindPass.cpp Mon Oct 27 17:48:35 2014
@@ -66,7 +66,7 @@ struct UnwindInfoPage {
class UnwindInfoAtom : public SimpleDefinedAtom {
public:
- UnwindInfoAtom(ArchHandler &archHandler, const File &file, bool swap,
+ UnwindInfoAtom(ArchHandler &archHandler, const File &file, bool isBig,
std::vector<uint32_t> commonEncodings,
std::vector<const Atom *> personalities,
std::vector<UnwindInfoPage> pages, uint32_t numLSDAs)
@@ -79,7 +79,7 @@ public:
_lsdaIndexOffset(_topLevelIndexOffset +
3 * (pages.size() + 1) * sizeof(uint32_t)),
_firstPageOffset(_lsdaIndexOffset + 2 * numLSDAs * sizeof(uint32_t)),
- _swap(swap) {
+ _isBig(isBig) {
addHeader(commonEncodings.size(), personalities.size(), pages.size());
addCommonEncodings(commonEncodings);
@@ -110,21 +110,22 @@ public:
uint32_t headerSize = 7 * sizeof(uint32_t);
_contents.resize(headerSize);
- int32_t *headerEntries = (int32_t *)_contents.data();
+ uint8_t *headerEntries = _contents.data();
// version
- write32(headerEntries[0], _swap, 1);
+ write32(headerEntries, 1, _isBig);
// commonEncodingsArraySectionOffset
- write32(headerEntries[1], _swap, _commonEncodingsOffset);
+ write32(headerEntries + sizeof(uint32_t), _commonEncodingsOffset, _isBig);
// commonEncodingsArrayCount
- write32(headerEntries[2], _swap, numCommon);
+ write32(headerEntries + 2 * sizeof(uint32_t), numCommon, _isBig);
// personalityArraySectionOffset
- write32(headerEntries[3], _swap, _personalityArrayOffset);
+ write32(headerEntries + 3 * sizeof(uint32_t), _personalityArrayOffset,
+ _isBig);
// personalityArrayCount
- write32(headerEntries[4], _swap, numPersonalities);
+ write32(headerEntries + 4 * sizeof(uint32_t), numPersonalities, _isBig);
// indexSectionOffset
- write32(headerEntries[5], _swap, _topLevelIndexOffset);
+ write32(headerEntries + 5 * sizeof(uint32_t), _topLevelIndexOffset, _isBig);
// indexCount
- write32(headerEntries[6], _swap, numPages + 1);
+ write32(headerEntries + 6 * sizeof(uint32_t), numPages + 1, _isBig);
}
/// Add the list of common encodings to the section; this is simply an array
@@ -134,11 +135,13 @@ public:
_contents.resize(_commonEncodingsOffset +
commonEncodings.size() * sizeof(uint32_t));
- int32_t *commonEncodingsArea =
- reinterpret_cast<int32_t *>(_contents.data() + _commonEncodingsOffset);
+ uint8_t *commonEncodingsArea =
+ reinterpret_cast<uint8_t *>(_contents.data() + _commonEncodingsOffset);
- for (uint32_t encoding : commonEncodings)
- write32(*commonEncodingsArea++, _swap, encoding);
+ for (uint32_t encoding : commonEncodings) {
+ write32(commonEncodingsArea, encoding, _isBig);
+ commonEncodingsArea += sizeof(uint32_t);
+ }
}
void addPersonalityFunctions(std::vector<const Atom *> personalities) {
@@ -160,16 +163,16 @@ public:
// The most difficult job here is calculating the LSDAs; everything else
// follows fairly naturally, but we can't state where the first
- int32_t *indexData = (int32_t *)&_contents[_topLevelIndexOffset];
+ uint8_t *indexData = &_contents[_topLevelIndexOffset];
uint32_t numLSDAs = 0;
for (unsigned i = 0; i < pages.size(); ++i) {
// functionOffset
addImageReference(_topLevelIndexOffset + 3 * i * sizeof(uint32_t),
pages[i].entries[0].rangeStart);
// secondLevelPagesSectionOffset
- write32(indexData[3 * i + 1], _swap, pageLoc);
- write32(indexData[3 * i + 2], _swap,
- _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t));
+ write32(indexData + (3 * i + 1) * sizeof(uint32_t), pageLoc, _isBig);
+ write32(indexData + (3 * i + 2) * sizeof(uint32_t),
+ _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t), _isBig);
for (auto &entry : pages[i].entries)
if (entry.lsdaLocation)
@@ -182,8 +185,8 @@ public:
3 * pages.size() * sizeof(uint32_t),
finalEntry.rangeStart, finalEntry.rangeLength);
// secondLevelPagesSectionOffset => 0
- indexData[3 * pages.size() + 2] =
- _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t);
+ write32(indexData + (3 * pages.size() + 2) * sizeof(uint32_t),
+ _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t), _isBig);
}
void addLSDAIndexes(std::vector<UnwindInfoPage> &pages, uint32_t numLSDAs) {
@@ -218,18 +221,17 @@ public:
using normalized::write32;
using normalized::write16;
// 2 => regular page
- write32(*(int32_t *)&_contents[curPageOffset], _swap, 2);
+ write32(&_contents[curPageOffset], 2, _isBig);
// offset of 1st entry
- write16(*(int16_t *)&_contents[curPageOffset + 4], _swap, headerSize);
- write16(*(int16_t *)&_contents[curPageOffset + 6], _swap,
- page.entries.size());
+ write16(&_contents[curPageOffset + 4], headerSize, _isBig);
+ write16(&_contents[curPageOffset + 6], page.entries.size(), _isBig);
uint32_t pagePos = curPageOffset + headerSize;
for (auto &entry : page.entries) {
addImageReference(pagePos, entry.rangeStart);
- write32(reinterpret_cast<int32_t *>(_contents.data() + pagePos)[1], _swap,
- entry.encoding);
+ write32(_contents.data() + pagePos + sizeof(uint32_t), entry.encoding,
+ _isBig);
if ((entry.encoding & 0x0f000000U) ==
_archHandler.dwarfCompactUnwindType())
addEhFrameReference(pagePos + sizeof(uint32_t), entry.ehFrame);
@@ -263,7 +265,7 @@ private:
uint32_t _topLevelIndexOffset;
uint32_t _lsdaIndexOffset;
uint32_t _firstPageOffset;
- bool _swap;
+ bool _isBig;
};
/// Pass for instantiating and optimizing GOT slots.
@@ -273,7 +275,7 @@ public:
CompactUnwindPass(const MachOLinkingContext &context)
: _context(context), _archHandler(_context.archHandler()),
_file("<mach-o Compact Unwind Pass>"),
- _swap(!MachOLinkingContext::isHostEndian(_context.arch())) {}
+ _isBig(MachOLinkingContext::isBigEndian(_context.arch())) {}
private:
void perform(std::unique_ptr<MutableFile> &mergedFile) override {
@@ -337,7 +339,7 @@ private:
// FIXME: we should also erase all compact-unwind atoms; their job is done.
UnwindInfoAtom *unwind = new (_file.allocator())
- UnwindInfoAtom(_archHandler, _file, _swap, std::vector<uint32_t>(),
+ UnwindInfoAtom(_archHandler, _file, _isBig, std::vector<uint32_t>(),
personalities, pages, numLSDAs);
mergedFile->addAtom(*unwind);
}
@@ -404,8 +406,9 @@ private:
using normalized::read32;
entry.rangeLength =
- read32(_swap, ((uint32_t *)atom->rawContent().data())[2]);
- entry.encoding = read32(_swap, ((uint32_t *)atom->rawContent().data())[3]);
+ read32(atom->rawContent().data() + 2 * sizeof(uint32_t), _isBig);
+ entry.encoding =
+ read32(atom->rawContent().data() + 3 * sizeof(uint32_t), _isBig);
return entry;
}
@@ -415,7 +418,7 @@ private:
for (const DefinedAtom *ehFrameAtom : mergedFile->defined()) {
if (ehFrameAtom->contentType() != DefinedAtom::typeCFI)
continue;
- if (ArchHandler::isDwarfCIE(_swap, ehFrameAtom))
+ if (ArchHandler::isDwarfCIE(_isBig, ehFrameAtom))
continue;
if (const Atom *function = _archHandler.fdeTargetFunction(ehFrameAtom))
@@ -506,7 +509,7 @@ private:
const MachOLinkingContext &_context;
mach_o::ArchHandler &_archHandler;
MachOFile _file;
- bool _swap;
+ bool _isBig;
};
void addCompactUnwindPass(PassManager &pm, const MachOLinkingContext &ctx) {
Modified: lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp Mon Oct 27 17:48:35 2014
@@ -53,14 +53,14 @@ namespace normalized {
// Utility to call a lambda expression on each load command.
static std::error_code forEachLoadCommand(
- StringRef lcRange, unsigned lcCount, bool swap, bool is64,
+ StringRef lcRange, unsigned lcCount, bool isBig, bool is64,
std::function<bool(uint32_t cmd, uint32_t size, const char *lc)> func) {
const char* p = lcRange.begin();
for (unsigned i=0; i < lcCount; ++i) {
const load_command *lc = reinterpret_cast<const load_command*>(p);
load_command lcCopy;
const load_command *slc = lc;
- if (swap) {
+ if (isBig != llvm::sys::IsBigEndianHost) {
memcpy(&lcCopy, lc, sizeof(load_command));
swapStruct(lcCopy);
slc = &lcCopy;
@@ -78,7 +78,7 @@ static std::error_code forEachLoadComman
}
static std::error_code appendRelocations(Relocations &relocs, StringRef buffer,
- bool swap, bool bigEndian,
+ bool bigEndian,
uint32_t reloff, uint32_t nreloc) {
if ((reloff + nreloc*8) > buffer.size())
return make_error_code(llvm::errc::executable_format_error);
@@ -86,24 +86,24 @@ static std::error_code appendRelocations
reinterpret_cast<const any_relocation_info*>(buffer.begin()+reloff);
for(uint32_t i=0; i < nreloc; ++i) {
- relocs.push_back(unpackRelocation(relocsArray[i], swap, bigEndian));
+ relocs.push_back(unpackRelocation(relocsArray[i], bigEndian));
}
return std::error_code();
}
static std::error_code
-appendIndirectSymbols(IndirectSymbols &isyms, StringRef buffer, bool swap,
- bool bigEndian, uint32_t istOffset, uint32_t istCount,
+appendIndirectSymbols(IndirectSymbols &isyms, StringRef buffer, bool isBig,
+ uint32_t istOffset, uint32_t istCount,
uint32_t startIndex, uint32_t count) {
if ((istOffset + istCount*4) > buffer.size())
return make_error_code(llvm::errc::executable_format_error);
if (startIndex+count > istCount)
return make_error_code(llvm::errc::executable_format_error);
- const uint32_t *indirectSymbolArray =
- reinterpret_cast<const uint32_t*>(buffer.begin()+istOffset);
+ const uint8_t *indirectSymbolArray = (const uint8_t *)buffer.data();
for(uint32_t i=0; i < count; ++i) {
- isyms.push_back(read32(swap, indirectSymbolArray[startIndex+i]));
+ isyms.push_back(read32(
+ indirectSymbolArray + (startIndex + i) * sizeof(uint32_t), isBig));
}
return std::error_code();
}
@@ -116,23 +116,23 @@ template <typename T> static T readBigEn
}
-static bool isMachOHeader(const mach_header *mh, bool &is64, bool &swap) {
- switch (mh->magic) {
+static bool isMachOHeader(const mach_header *mh, bool &is64, bool &isBig) {
+ switch (read32(&mh->magic, false)) {
case llvm::MachO::MH_MAGIC:
is64 = false;
- swap = false;
+ isBig = false;
return true;
case llvm::MachO::MH_MAGIC_64:
is64 = true;
- swap = false;
+ isBig = false;
return true;
case llvm::MachO::MH_CIGAM:
is64 = false;
- swap = true;
+ isBig = true;
return true;
case llvm::MachO::MH_CIGAM_64:
is64 = true;
- swap = true;
+ isBig = true;
return true;
default:
return false;
@@ -154,17 +154,18 @@ bool isThinObjectFile(StringRef path, Ma
// If file buffer does not start with MH_MAGIC (and variants), not obj file.
const mach_header *mh = reinterpret_cast<const mach_header *>(
fileBuffer.begin());
- bool is64, swap;
- if (!isMachOHeader(mh, is64, swap))
+ bool is64, isBig;
+ if (!isMachOHeader(mh, is64, isBig))
return false;
// If not MH_OBJECT, not object file.
- if (read32(swap, mh->filetype) != MH_OBJECT)
+ if (read32(&mh->filetype, isBig) != MH_OBJECT)
return false;
// Lookup up arch from cpu/subtype pair.
- arch = MachOLinkingContext::archFromCpuType(read32(swap, mh->cputype),
- read32(swap, mh->cpusubtype));
+ arch = MachOLinkingContext::archFromCpuType(
+ read32(&mh->cputype, isBig),
+ read32(&mh->cpusubtype, isBig));
return true;
}
@@ -217,14 +218,14 @@ readBinary(std::unique_ptr<MemoryBuffer>
}
// Determine endianness and pointer size for mach-o file.
- bool is64, swap;
- if (!isMachOHeader(mh, is64, swap))
+ bool is64, isBig;
+ if (!isMachOHeader(mh, is64, isBig))
return make_error_code(llvm::errc::executable_format_error);
// Endian swap header, if needed.
mach_header headerCopy;
const mach_header *smh = mh;
- if (swap) {
+ if (isBig != llvm::sys::IsBigEndianHost) {
memcpy(&headerCopy, mh, sizeof(mach_header));
swapStruct(headerCopy);
smh = &headerCopy;
@@ -247,7 +248,6 @@ readBinary(std::unique_ptr<MemoryBuffer>
+ MachOLinkingContext::nameFromArch(f->arch)
+ ")" ));
}
- bool isBigEndianArch = MachOLinkingContext::isBigEndian(f->arch);
// Copy file type and flags
f->fileType = HeaderFileType(smh->filetype);
f->flags = smh->flags;
@@ -256,13 +256,13 @@ readBinary(std::unique_ptr<MemoryBuffer>
// Pre-scan load commands looking for indirect symbol table.
uint32_t indirectSymbolTableOffset = 0;
uint32_t indirectSymbolTableCount = 0;
- std::error_code ec = forEachLoadCommand(lcRange, lcCount, swap, is64,
+ std::error_code ec = forEachLoadCommand(lcRange, lcCount, isBig, is64,
[&](uint32_t cmd, uint32_t size,
const char *lc) -> bool {
if (cmd == LC_DYSYMTAB) {
const dysymtab_command *d = reinterpret_cast<const dysymtab_command*>(lc);
- indirectSymbolTableOffset = read32(swap, d->indirectsymoff);
- indirectSymbolTableCount = read32(swap, d->nindirectsyms);
+ indirectSymbolTableOffset = read32(&d->indirectsymoff, isBig);
+ indirectSymbolTableCount = read32(&d->nindirectsyms, isBig);
return true;
}
return false;
@@ -274,16 +274,14 @@ readBinary(std::unique_ptr<MemoryBuffer>
const data_in_code_entry *dataInCode = nullptr;
const dyld_info_command *dyldInfo = nullptr;
uint32_t dataInCodeSize = 0;
- ec = forEachLoadCommand(lcRange, lcCount, swap, is64,
+ ec = forEachLoadCommand(lcRange, lcCount, isBig, is64,
[&] (uint32_t cmd, uint32_t size, const char* lc) -> bool {
switch(cmd) {
case LC_SEGMENT_64:
if (is64) {
const segment_command_64 *seg =
reinterpret_cast<const segment_command_64*>(lc);
- const unsigned sectionCount = (swap
- ? llvm::sys::getSwappedBytes(seg->nsects)
- : seg->nsects);
+ const unsigned sectionCount = read32(&seg->nsects, isBig);
const section_64 *sects = reinterpret_cast<const section_64*>
(lc + sizeof(segment_command_64));
const unsigned lcSize = sizeof(segment_command_64)
@@ -296,26 +294,27 @@ readBinary(std::unique_ptr<MemoryBuffer>
Section section;
section.segmentName = getString16(sect->segname);
section.sectionName = getString16(sect->sectname);
- section.type = (SectionType)(read32(swap, sect->flags)
- & SECTION_TYPE);
- section.attributes = read32(swap, sect->flags) & SECTION_ATTRIBUTES;
- section.alignment = read32(swap, sect->align);
- section.address = read64(swap, sect->addr);
+ section.type = (SectionType)(read32(§->flags, isBig) &
+ SECTION_TYPE);
+ section.attributes = read32(§->flags, isBig) & SECTION_ATTRIBUTES;
+ section.alignment = read32(§->align, isBig);
+ section.address = read64(§->addr, isBig);
const uint8_t *content =
- (uint8_t *)start + read32(swap, sect->offset);
- size_t contentSize = read64(swap, sect->size);
+ (uint8_t *)start + read32(§->offset, isBig);
+ size_t contentSize = read64(§->size, isBig);
// Note: this assign() is copying the content bytes. Ideally,
// we can use a custom allocator for vector to avoid the copy.
section.content = llvm::makeArrayRef(content, contentSize);
- appendRelocations(section.relocations, mb->getBuffer(),
- swap, isBigEndianArch, read32(swap, sect->reloff),
- read32(swap, sect->nreloc));
+ appendRelocations(section.relocations, mb->getBuffer(), isBig,
+ read32(§->reloff, isBig),
+ read32(§->nreloc, isBig));
if (section.type == S_NON_LAZY_SYMBOL_POINTERS) {
appendIndirectSymbols(section.indirectSymbols, mb->getBuffer(),
- swap, isBigEndianArch,
+ isBig,
indirectSymbolTableOffset,
indirectSymbolTableCount,
- read32(swap, sect->reserved1), contentSize/4);
+ read32(§->reserved1, isBig),
+ contentSize/4);
}
f->sections.push_back(section);
}
@@ -325,9 +324,7 @@ readBinary(std::unique_ptr<MemoryBuffer>
if (!is64) {
const segment_command *seg =
reinterpret_cast<const segment_command*>(lc);
- const unsigned sectionCount = (swap
- ? llvm::sys::getSwappedBytes(seg->nsects)
- : seg->nsects);
+ const unsigned sectionCount = read32(&seg->nsects, isBig);
const section *sects = reinterpret_cast<const section*>
(lc + sizeof(segment_command));
const unsigned lcSize = sizeof(segment_command)
@@ -340,26 +337,26 @@ readBinary(std::unique_ptr<MemoryBuffer>
Section section;
section.segmentName = getString16(sect->segname);
section.sectionName = getString16(sect->sectname);
- section.type = (SectionType)(read32(swap, sect->flags)
- & SECTION_TYPE);
- section.attributes = read32(swap, sect->flags) & SECTION_ATTRIBUTES;
- section.alignment = read32(swap, sect->align);
- section.address = read32(swap, sect->addr);
+ section.type = (SectionType)(read32(§->flags, isBig) &
+ SECTION_TYPE);
+ section.attributes =
+ read32((uint8_t *)§->flags, isBig) & SECTION_ATTRIBUTES;
+ section.alignment = read32(§->align, isBig);
+ section.address = read32(§->addr, isBig);
const uint8_t *content =
- (uint8_t *)start + read32(swap, sect->offset);
- size_t contentSize = read32(swap, sect->size);
+ (uint8_t *)start + read32(§->offset, isBig);
+ size_t contentSize = read32(§->size, isBig);
// Note: this assign() is copying the content bytes. Ideally,
// we can use a custom allocator for vector to avoid the copy.
section.content = llvm::makeArrayRef(content, contentSize);
- appendRelocations(section.relocations, mb->getBuffer(),
- swap, isBigEndianArch, read32(swap, sect->reloff),
- read32(swap, sect->nreloc));
+ appendRelocations(section.relocations, mb->getBuffer(), isBig,
+ read32(§->reloff, isBig),
+ read32(§->nreloc, isBig));
if (section.type == S_NON_LAZY_SYMBOL_POINTERS) {
- appendIndirectSymbols(section.indirectSymbols, mb->getBuffer(),
- swap, isBigEndianArch,
- indirectSymbolTableOffset,
- indirectSymbolTableCount,
- read32(swap, sect->reserved1), contentSize/4);
+ appendIndirectSymbols(
+ section.indirectSymbols, mb->getBuffer(), isBig,
+ indirectSymbolTableOffset, indirectSymbolTableCount,
+ read32(§->reserved1, isBig), contentSize / 4);
}
f->sections.push_back(section);
}
@@ -367,15 +364,16 @@ readBinary(std::unique_ptr<MemoryBuffer>
break;
case LC_SYMTAB: {
const symtab_command *st = reinterpret_cast<const symtab_command*>(lc);
- const char *strings = start + read32(swap, st->stroff);
- const uint32_t strSize = read32(swap, st->strsize);
+ const char *strings = start + read32(&st->stroff, isBig);
+ const uint32_t strSize = read32(&st->strsize, isBig);
// Validate string pool and symbol table all in buffer.
- if ( read32(swap, st->stroff)+read32(swap, st->strsize)
- > objSize )
+ if (read32((uint8_t *)&st->stroff, isBig) +
+ read32((uint8_t *)&st->strsize, isBig) >
+ objSize)
return true;
if (is64) {
- const uint32_t symOffset = read32(swap, st->symoff);
- const uint32_t symCount = read32(swap, st->nsyms);
+ const uint32_t symOffset = read32(&st->symoff, isBig);
+ const uint32_t symCount = read32(&st->nsyms, isBig);
if ( symOffset+(symCount*sizeof(nlist_64)) > objSize)
return true;
const nlist_64 *symbols =
@@ -384,7 +382,7 @@ readBinary(std::unique_ptr<MemoryBuffer>
for(uint32_t i=0; i < symCount; ++i) {
const nlist_64 *sin = &symbols[i];
nlist_64 tempSym;
- if (swap) {
+ if (isBig != llvm::sys::IsBigEndianHost) {
tempSym = *sin; swapStruct(tempSym); sin = &tempSym;
}
Symbol sout;
@@ -404,8 +402,8 @@ readBinary(std::unique_ptr<MemoryBuffer>
f->localSymbols.push_back(sout);
}
} else {
- const uint32_t symOffset = read32(swap, st->symoff);
- const uint32_t symCount = read32(swap, st->nsyms);
+ const uint32_t symOffset = read32(&st->symoff, isBig);
+ const uint32_t symCount = read32(&st->nsyms, isBig);
if ( symOffset+(symCount*sizeof(nlist)) > objSize)
return true;
const nlist *symbols =
@@ -414,7 +412,7 @@ readBinary(std::unique_ptr<MemoryBuffer>
for(uint32_t i=0; i < symCount; ++i) {
const nlist *sin = &symbols[i];
nlist tempSym;
- if (swap) {
+ if (isBig != llvm::sys::IsBigEndianHost) {
tempSym = *sin; swapStruct(tempSym); sin = &tempSym;
}
Symbol sout;
@@ -438,15 +436,15 @@ readBinary(std::unique_ptr<MemoryBuffer>
break;
case LC_ID_DYLIB: {
const dylib_command *dl = reinterpret_cast<const dylib_command*>(lc);
- f->installName = lc + read32(swap, dl->dylib.name);
+ f->installName = lc + read32(&dl->dylib.name, isBig);
}
break;
case LC_DATA_IN_CODE: {
const linkedit_data_command *ldc =
reinterpret_cast<const linkedit_data_command*>(lc);
- dataInCode = reinterpret_cast<const data_in_code_entry*>(
- start + read32(swap, ldc->dataoff));
- dataInCodeSize = read32(swap, ldc->datasize);
+ dataInCode = reinterpret_cast<const data_in_code_entry *>(
+ start + read32(&ldc->dataoff, isBig));
+ dataInCodeSize = read32(&ldc->datasize, isBig);
}
break;
case LC_LOAD_DYLIB:
@@ -455,7 +453,7 @@ readBinary(std::unique_ptr<MemoryBuffer>
case LC_LOAD_UPWARD_DYLIB: {
const dylib_command *dl = reinterpret_cast<const dylib_command*>(lc);
DependentDylib entry;
- entry.path = lc + read32(swap, dl->dylib.name);
+ entry.path = lc + read32(&dl->dylib.name, isBig);
entry.kind = LoadCommandType(cmd);
f->dependentDylibs.push_back(entry);
}
@@ -474,9 +472,10 @@ readBinary(std::unique_ptr<MemoryBuffer>
// Convert on-disk data_in_code_entry array to DataInCode vector.
for (unsigned i=0; i < dataInCodeSize/sizeof(data_in_code_entry); ++i) {
DataInCode entry;
- entry.offset = read32(swap, dataInCode[i].offset);
- entry.length = read16(swap, dataInCode[i].length);
- entry.kind = (DataRegionType)read16(swap, dataInCode[i].kind);
+ entry.offset = read32(&dataInCode[i].offset, isBig);
+ entry.length = read16(&dataInCode[i].length, isBig);
+ entry.kind =
+ (DataRegionType)read16((uint8_t *)&dataInCode[i].kind, isBig);
f->dataInCode.push_back(entry);
}
}
Modified: lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h (original)
+++ lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h Mon Oct 27 17:48:35 2014
@@ -13,6 +13,7 @@
#include "lld/Core/LLVM.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MachO.h"
@@ -27,28 +28,54 @@ namespace normalized {
using llvm::sys::getSwappedBytes;
-inline uint16_t read16(bool swap, uint16_t value) {
- return (swap ? getSwappedBytes(value) : value);
-}
-
-inline uint32_t read32(bool swap, uint32_t value) {
- return (swap ? getSwappedBytes(value) : value);
-}
-
-inline uint64_t read64(bool swap, uint64_t value) {
- return (swap ? getSwappedBytes(value) : value);
-}
-
-inline void write16(int16_t &loc, bool swap, int16_t value) {
- loc = (swap ? getSwappedBytes(value) : value);
-}
-
-inline void write32(int32_t &loc, bool swap, int32_t value) {
- loc = (swap ? getSwappedBytes(value) : value);
-}
-
-inline void write64(uint64_t &loc, bool swap, uint64_t value) {
- loc = (swap ? getSwappedBytes(value) : value);
+ using llvm::support::ubig16_t;
+ using llvm::support::ubig32_t;
+ using llvm::support::ubig64_t;
+
+ using llvm::support::ulittle16_t;
+ using llvm::support::ulittle32_t;
+ using llvm::support::ulittle64_t;
+
+template<typename T>
+static inline uint16_t read16(const T *loc, bool isBig) {
+ assert((uint64_t)loc % llvm::alignOf<T>() == 0 &&
+ "invalid pointer alignment");
+ return isBig ? *(ubig16_t *)loc : *(ulittle16_t *)loc;
+}
+
+template<typename T>
+static inline uint32_t read32(const T *loc, bool isBig) {
+ assert((uint64_t)loc % llvm::alignOf<T>() == 0 &&
+ "invalid pointer alignment");
+ return isBig ? *(ubig32_t *)loc : *(ulittle32_t *)loc;
+}
+
+template<typename T>
+static inline uint64_t read64(const T *loc, bool isBig) {
+ assert((uint64_t)loc % llvm::alignOf<T>() == 0 &&
+ "invalid pointer alignment");
+ return isBig ? *(ubig64_t *)loc : *(ulittle64_t *)loc;
+}
+
+inline void write16(uint8_t *loc, uint16_t value, bool isBig) {
+ if (isBig)
+ *(ubig16_t *)loc = value;
+ else
+ *(ulittle16_t *)loc = value;
+}
+
+inline void write32(uint8_t *loc, uint32_t value, bool isBig) {
+ if (isBig)
+ *(ubig32_t *)loc = value;
+ else
+ *(ulittle32_t *)loc = value;
+}
+
+inline void write64(uint8_t *loc, uint64_t value, bool isBig) {
+ if (isBig)
+ *(ubig64_t *)loc = value;
+ else
+ *(ulittle64_t *)loc = value;
}
inline uint32_t
@@ -69,11 +96,10 @@ bitFieldSet(uint32_t &bits, bool isBigEn
bits |= (newBits << shift);
}
-inline Relocation
-unpackRelocation(const llvm::MachO::any_relocation_info &r, bool swap,
- bool isBigEndian) {
- uint32_t r0 = read32(swap, r.r_word0);
- uint32_t r1 = read32(swap, r.r_word1);
+inline Relocation unpackRelocation(const llvm::MachO::any_relocation_info &r,
+ bool isBigEndian) {
+ uint32_t r0 = read32(&r.r_word0, isBigEndian);
+ uint32_t r1 = read32(&r.r_word1, isBigEndian);
Relocation result;
if (r0 & llvm::MachO::R_SCATTERED) {
Modified: lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp?rev=220730&r1=220729&r2=220730&view=diff
==============================================================================
--- lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp (original)
+++ lld/trunk/lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp Mon Oct 27 17:48:35 2014
@@ -365,7 +365,7 @@ std::error_code processSection(DefinedAt
MachOFile &file, bool scatterable,
bool copyRefs) {
const bool is64 = MachOLinkingContext::is64Bit(normalizedFile.arch);
- const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch);
+ const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch);
// Get info on how to atomize section.
unsigned int sizeMultiple;
@@ -423,7 +423,7 @@ std::error_code processSection(DefinedAt
case atomizeCFI:
// Break section up into dwarf unwind CFIs (FDE or CIE).
cfi = reinterpret_cast<const uint32_t *>(§ion.content[offset]);
- size = read32(swap, *cfi) + 4;
+ size = read32(§ion.content[offset], isBig) + 4;
if (offset+size > section.content.size()) {
return make_dynamic_error_code(Twine(Twine("Section ")
+ section.segmentName
@@ -572,7 +572,7 @@ std::error_code convertRelocs(const Sect
}
};
- const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch);
+ const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch);
// Use old-school iterator so that paired relocations can be grouped.
for (auto it=section.relocations.begin(), e=section.relocations.end();
it != e; ++it) {
@@ -595,18 +595,16 @@ std::error_code convertRelocs(const Sect
std::error_code relocErr;
if (handler.isPairedReloc(reloc)) {
// Handle paired relocations together.
- relocErr = handler.getPairReferenceInfo(reloc, *++it, inAtom,
- offsetInAtom, fixupAddress, swap,
- scatterable, atomByAddr,
- atomBySymbol, &kind,
- &target, &addend);
+ relocErr = handler.getPairReferenceInfo(
+ reloc, *++it, inAtom, offsetInAtom, fixupAddress, isBig, scatterable,
+ atomByAddr, atomBySymbol, &kind, &target, &addend);
}
else {
// Use ArchHandler to convert relocation record into information
// needed to instantiate an lld::Reference object.
- relocErr = handler.getReferenceInfo(reloc, inAtom, offsetInAtom,
- fixupAddress,swap, atomByAddr,
- atomBySymbol, &kind, &target, &addend);
+ relocErr = handler.getReferenceInfo(
+ reloc, inAtom, offsetInAtom, fixupAddress, isBig, atomByAddr,
+ atomBySymbol, &kind, &target, &addend);
}
if (relocErr) {
return make_dynamic_error_code(
@@ -638,18 +636,18 @@ bool isDebugInfoSection(const Section &s
return section.segmentName.equals("__DWARF");
}
-static int64_t readSPtr(bool is64, bool swap, const uint8_t *addr) {
+static int64_t readSPtr(bool is64, bool isBig, const uint8_t *addr) {
if (is64)
- return read64(swap, *reinterpret_cast<const uint64_t *>(addr));
+ return read64(addr, isBig);
- int32_t res = read32(swap, *reinterpret_cast<const uint32_t *>(addr));
+ int32_t res = read32(addr, isBig);
return res;
}
std::error_code addEHFrameReferences(const NormalizedFile &normalizedFile,
MachOFile &file,
mach_o::ArchHandler &handler) {
- const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch);
+ const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch);
const bool is64 = MachOLinkingContext::is64Bit(normalizedFile.arch);
const Section *ehFrameSection = nullptr;
@@ -668,7 +666,7 @@ std::error_code addEHFrameReferences(con
[&](MachODefinedAtom *atom, uint64_t offset) -> void {
assert(atom->contentType() == DefinedAtom::typeCFI);
- if (ArchHandler::isDwarfCIE(swap, atom))
+ if (ArchHandler::isDwarfCIE(isBig, atom))
return;
// Compiler wasn't lazy and actually told us what it meant.
@@ -676,14 +674,14 @@ std::error_code addEHFrameReferences(con
return;
const uint8_t *frameData = atom->rawContent().data();
- uint32_t size = read32(swap, *(uint32_t *)frameData);
+ uint32_t size = read32(frameData, isBig);
uint64_t cieFieldInFDE = size == 0xffffffffU
? sizeof(uint32_t) + sizeof(uint64_t)
: sizeof(uint32_t);
// Linker needs to fixup a reference from the FDE to its parent CIE (a
// 32-bit byte offset backwards in the __eh_frame section).
- uint32_t cieDelta = read32(swap, *(uint32_t *)(frameData + cieFieldInFDE));
+ uint32_t cieDelta = read32(frameData + cieFieldInFDE, isBig);
uint64_t cieAddress = ehFrameSection->address + offset + cieFieldInFDE;
cieAddress -= cieDelta;
@@ -699,7 +697,7 @@ std::error_code addEHFrameReferences(con
// (hopefully)
uint64_t rangeFieldInFDE = cieFieldInFDE + sizeof(uint32_t);
- int64_t functionFromFDE = readSPtr(is64, swap, frameData + rangeFieldInFDE);
+ int64_t functionFromFDE = readSPtr(is64, isBig, frameData + rangeFieldInFDE);
uint64_t rangeStart = ehFrameSection->address + offset + rangeFieldInFDE;
rangeStart += functionFromFDE;
More information about the llvm-commits
mailing list