aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2014-11-24 17:02:24 +0000
commit91bc56ed825ba56b3cc264aa5c95ab84f86832ab (patch)
tree4df130b28021d86e13bf4565ef58c1c5a5e093b4 /contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets
parent9efc7e72bb1daf5d6019871d9c93a1c488a11229 (diff)
parent5ca98fd98791947eba83a1ed3f2c8191ef7afa6c (diff)
Merge llvm 3.5.0 release from ^/vendor/llvm/dist, resolve conflicts, and
preserve our customizations, where necessary.
Notes
Notes: svn path=/projects/clang350-import/; revision=274968
Diffstat (limited to 'contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets')
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h255
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h154
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h315
-rw-r--r--contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h132
4 files changed, 856 insertions, 0 deletions
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 000000000000..775ed9ec3635
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,255 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+ RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) && "");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ bool HasExplicitAddend = false;
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ HasExplicitAddend = true;
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = (RawAddend << 40) >> 40;
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ if (HasExplicitAddend) {
+ RE.Addend = ExplicitAddend;
+ Value.Addend = ExplicitAddend;
+ }
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x03FFFFFF;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("branch26 instruction has embedded addend.");
+ else
+ llvm_unreachable("branch26 instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ assert(isInt<26>(PCRelVal) && "Branch target out of range!");
+ // Insert the value into the instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(PCRelVal >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend =
+ ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3);
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ uint64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ // Check that the value fits into 21 bits (+ 12 lower bits).
+ assert(isInt<33>(PCRelVal) && "Invalid page reloc value!");
+ // Insert the value into the instruction.
+ uint32_t ImmLoValue = (uint32_t)(PCRelVal << 17) & 0x60000000;
+ uint32_t ImmHiValue = (uint32_t)(PCRelVal >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // Check if the addend is encoded in the instruction.
+ uint32_t EncodedAddend = *p & 0x003FFC00;
+ if (EncodedAddend != 0) {
+ if (RE.Addend == 0)
+ llvm_unreachable("adrp instruction has embedded addend.");
+ else
+ llvm_unreachable("adrp instruction has embedded addend and"
+ "ARM64_RELOC_ADDEND.");
+ }
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ // Check which instruction we are updating to obtain the implicit shift
+ // factor from LDR/STR instructions.
+ if (*p & 0x08000000) {
+ uint32_t ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Value & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Value & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ case 2:
+ assert(((Value & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ case 3:
+ assert(((Value & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ }
+ // Compensate for implicit shift.
+ Value >>= ImplicitShift;
+ }
+ // Insert the value into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Value << 10) & 0x003FFC00);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not implemented yet!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert(RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end())
+ Addr = Section.Address + i->second;
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.Address);
+ uintptr_t StubAlignment = getStubAlignment();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.StubOffset + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(((StubAddress % getStubAlignment()) == 0) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Addend,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset = StubOffset + getMaxStubSize();
+ Addr = (uint8_t *)StubAddress;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, /*Addend=*/0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOAARCH64_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 000000000000..1de994219824
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,154 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOARM_H
+#define LLVM_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+public:
+ RuntimeDyldMachOARM(RTDyldMemoryManager *MM) : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 4; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return ++++RelI;
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if ((RE.RelType & 0xf) == MachO::ARM_RELOC_BR24)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ // FIXME: For now, assume ARM mode.
+ Value -= 8;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ uint32_t *p = (uint32_t *)LocalAddress;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // Check for overflow.
+ if (Value != FinalValue) {
+ Error("ARM BR24 relocation out of range.");
+ return;
+ }
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ *p = (*p & ~0xffffff) | FinalValue;
+ break;
+ }
+ case MachO::ARM_THUMB_RELOC_BR22:
+ case MachO::ARM_THUMB_32BIT_BRANCH:
+ case MachO::ARM_RELOC_HALF:
+ case MachO::ARM_RELOC_HALF_SECTDIFF:
+ case MachO::ARM_RELOC_PAIR:
+ case MachO::ARM_RELOC_SECTDIFF:
+ case MachO::ARM_RELOC_LOCAL_SECTDIFF:
+ case MachO::ARM_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ return;
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ // Create a new stub function.
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *StubTargetAddr =
+ createStubFunction(Section.Address + Section.StubOffset);
+ RelocationEntry StubRE(RE.SectionID, StubTargetAddr - Section.Address,
+ MachO::GENERIC_RELOC_VANILLA, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Addr = Section.Address + Section.StubOffset;
+ Section.StubOffset += getMaxStubSize();
+ }
+ RelocationEntry TargetRE(Value.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOARM_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 000000000000..856c6ca3035c
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,315 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOI386_H
+#define LLVM_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+ RuntimeDyldMachOI386(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 0; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ else if (Arch == Triple::x86 && RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processI386ScatteredVANILLA(SectionID, RelI, ObjImg,
+ ObjSectionToID);
+ llvm_unreachable("Unhandled scattered relocation.");
+ }
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ RE.Addend = Value.Addend;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].LoadAddress;
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].LoadAddress;
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(LocalAddress, Value, 1 << RE.Size);
+ break;
+ }
+ case MachO::GENERIC_RELOC_PB_LA_PTR:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ Section.getName(Name);
+
+ if (Name == "__jump_table")
+ populateJumpTable(cast<MachOObjectFile>(*ObjImg.getObjectFile()), Section,
+ SectionID);
+ else if (Name == "__pointers")
+ populatePointersSection(cast<MachOObjectFile>(*ObjImg.getObjectFile()),
+ Section, SectionID);
+ }
+
+private:
+ relocation_iterator
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &Obj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = MachO->getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(*MachO, AddrA);
+ assert(SAI != MachO->section_end() && "Can't find section for address A");
+ uint64_t SectionABase;
+ SAI->getAddress(SectionABase);
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode;
+ SectionA.isText(IsCode);
+ uint32_t SectionAID =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID);
+
+ uint32_t AddrB = MachO->getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(*MachO, AddrB);
+ assert(SBI != MachO->section_end() && "Can't find section for address B");
+ uint64_t SectionBBase;
+ SBI->getAddress(SectionBBase);
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID);
+
+ if (Addend != AddrA - AddrB)
+ Error("Unexpected SECTDIFF relocation addend.");
+
+ DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
+ << ", Addend: " << Addend << ", SectionA ID: " << SectionAID
+ << ", SectionAOffset: " << SectionAOffset
+ << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, 0, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ Size);
+
+ addRelocationForSection(R, SectionAID);
+ addRelocationForSection(R, SectionBID);
+
+ return ++RelI;
+ }
+
+ relocation_iterator processI386ScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI, ObjectImage &Obj,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile *MachO =
+ static_cast<const MachOObjectFile *>(Obj.getObjectFile());
+ MachO::any_relocation_info RE =
+ MachO->getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO->getAnyRelocationType(RE);
+ bool IsPCRel = MachO->getAnyRelocationPCRel(RE);
+ unsigned Size = MachO->getAnyRelocationLength(RE);
+ uint64_t Offset;
+ RelI->getOffset(Offset);
+ uint8_t *LocalAddress = Section.Address + Offset;
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = 0;
+ memcpy(&Addend, LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = MachO->getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(*MachO, SymbolBaseAddr);
+ assert(TargetSI != MachO->section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr;
+ TargetSI->getAddress(SectionBaseAddr);
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode;
+ TargetSection.isText(IsCode);
+ uint32_t TargetSectionID =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID);
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ void populateJumpTable(MachOObjectFile &Obj, const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__jump_table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ assert((JTSectionSize % JTEntrySize) == 0 &&
+ "Jump-table section does not contain a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+ }
+
+ // Populate __pointers section.
+ void populatePointersSection(MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "__pointers section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ DEBUG(dbgs() << "Populating __pointers, Section ID " << PTSectionID << ", "
+ << NumPTEntries << " entries, " << PTEntrySize
+ << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ SI->getName(IndirectSymbolName);
+ DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ }
+
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr, SSize;
+ SI->getAddress(SAddr);
+ SI->getSize(SSize);
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOI386_H
diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 000000000000..99efe9da4486
--- /dev/null
+++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,132 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+ RuntimeDyldMachOX86_64(RTDyldMemoryManager *MM)
+ : RuntimeDyldMachOCRTPBase(MM) {}
+
+ unsigned getMaxStubSize() override { return 8; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ relocation_iterator
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ ObjectImage &ObjImg, ObjSectionToIDMap &ObjSectionToID,
+ const SymbolTableMap &Symbols, StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(*ObjImg.getObjectFile());
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getBasicRelocationEntry(SectionID, ObjImg, RelI));
+ RelocationValueRef Value(
+ getRelocationValueRef(ObjImg, RelI, RE, ObjSectionToID, Symbols));
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, ObjImg, RelI);
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) {
+ DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.Address + RE.Offset;
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(LocalAddress, Value + RE.Addend, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ case MachO::X86_64_RELOC_GOT:
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ case MachO::X86_64_RELOC_TLV:
+ Error("Relocation type not implemented yet!");
+ }
+ }
+
+ void finalizeSection(ObjectImage &ObjImg, unsigned SectionID,
+ const SectionRef &Section) {}
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Addend -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.Address + i->second;
+ } else {
+ Stubs[Value] = Section.StubOffset;
+ uint8_t *GOTEntry = Section.Address + Section.StubOffset;
+ RelocationEntry GOTRE(RE.SectionID, Section.StubOffset,
+ MachO::X86_64_RELOC_UNSIGNED, Value.Addend, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.StubOffset += 8;
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_RUNTIMEDYLDMACHOX86_64_H